1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
10
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/export.h>
22 #include <linux/kmemleak.h>
23 #include <linux/cc_platform.h>
24 #include <linux/iopoll.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
27 #include <asm/apic.h>
28 #include <asm/gart.h>
29 #include <asm/x86_init.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
32 #include <asm/set_memory.h>
33
34 #include <linux/crash_dump.h>
35
36 #include "amd_iommu.h"
37 #include "../irq_remapping.h"
38
39 /*
40 * definitions for the ACPI scanning code
41 */
42 #define IVRS_HEADER_LENGTH 48
43
44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
45 #define ACPI_IVMD_TYPE_ALL 0x20
46 #define ACPI_IVMD_TYPE 0x21
47 #define ACPI_IVMD_TYPE_RANGE 0x22
48
49 #define IVHD_DEV_ALL 0x01
50 #define IVHD_DEV_SELECT 0x02
51 #define IVHD_DEV_SELECT_RANGE_START 0x03
52 #define IVHD_DEV_RANGE_END 0x04
53 #define IVHD_DEV_ALIAS 0x42
54 #define IVHD_DEV_ALIAS_RANGE 0x43
55 #define IVHD_DEV_EXT_SELECT 0x46
56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
57 #define IVHD_DEV_SPECIAL 0x48
58 #define IVHD_DEV_ACPI_HID 0xf0
59
60 #define UID_NOT_PRESENT 0
61 #define UID_IS_INTEGER 1
62 #define UID_IS_CHARACTER 2
63
64 #define IVHD_SPECIAL_IOAPIC 1
65 #define IVHD_SPECIAL_HPET 2
66
67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
70 #define IVHD_FLAG_ISOC_EN_MASK 0x08
71
72 #define IVMD_FLAG_EXCL_RANGE 0x08
73 #define IVMD_FLAG_IW 0x04
74 #define IVMD_FLAG_IR 0x02
75 #define IVMD_FLAG_UNITY_MAP 0x01
76
77 #define ACPI_DEVFLAG_INITPASS 0x01
78 #define ACPI_DEVFLAG_EXTINT 0x02
79 #define ACPI_DEVFLAG_NMI 0x04
80 #define ACPI_DEVFLAG_SYSMGT1 0x10
81 #define ACPI_DEVFLAG_SYSMGT2 0x20
82 #define ACPI_DEVFLAG_LINT0 0x40
83 #define ACPI_DEVFLAG_LINT1 0x80
84 #define ACPI_DEVFLAG_ATSDIS 0x10000000
85
86 #define LOOP_TIMEOUT 2000000
87 /*
88 * ACPI table definitions
89 *
90 * These data structures are laid over the table to parse the important values
91 * out of it.
92 */
93
94 extern const struct iommu_ops amd_iommu_ops;
95
96 /*
97 * structure describing one IOMMU in the ACPI table. Typically followed by one
98 * or more ivhd_entrys.
99 */
100 struct ivhd_header {
101 u8 type;
102 u8 flags;
103 u16 length;
104 u16 devid;
105 u16 cap_ptr;
106 u64 mmio_phys;
107 u16 pci_seg;
108 u16 info;
109 u32 efr_attr;
110
111 /* Following only valid on IVHD type 11h and 40h */
112 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
113 u64 res;
114 } __attribute__((packed));
115
116 /*
117 * A device entry describing which devices a specific IOMMU translates and
118 * which requestor ids they use.
119 */
120 struct ivhd_entry {
121 u8 type;
122 u16 devid;
123 u8 flags;
124 struct_group(ext_hid,
125 u32 ext;
126 u32 hidh;
127 );
128 u64 cid;
129 u8 uidf;
130 u8 uidl;
131 u8 uid;
132 } __attribute__((packed));
133
134 /*
135 * An AMD IOMMU memory definition structure. It defines things like exclusion
136 * ranges for devices and regions that should be unity mapped.
137 */
138 struct ivmd_header {
139 u8 type;
140 u8 flags;
141 u16 length;
142 u16 devid;
143 u16 aux;
144 u64 resv;
145 u64 range_start;
146 u64 range_length;
147 } __attribute__((packed));
148
149 bool amd_iommu_dump;
150 bool amd_iommu_irq_remap __read_mostly;
151
152 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
153
154 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
155 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
156
157 static bool amd_iommu_detected;
158 static bool amd_iommu_disabled __initdata;
159 static bool amd_iommu_force_enable __initdata;
160 static int amd_iommu_target_ivhd_type;
161
162 u16 amd_iommu_last_bdf; /* largest PCI device id we have
163 to handle */
164 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
165 we find in ACPI */
166
167 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
168 system */
169
170 /* Array to assign indices to IOMMUs*/
171 struct amd_iommu *amd_iommus[MAX_IOMMUS];
172
173 /* Number of IOMMUs present in the system */
174 static int amd_iommus_present;
175
176 /* IOMMUs have a non-present cache? */
177 bool amd_iommu_np_cache __read_mostly;
178 bool amd_iommu_iotlb_sup __read_mostly = true;
179
180 u32 amd_iommu_max_pasid __read_mostly = ~0;
181
182 bool amd_iommu_v2_present __read_mostly;
183 static bool amd_iommu_pc_present __read_mostly;
184 bool amdr_ivrs_remap_support __read_mostly;
185
186 bool amd_iommu_force_isolation __read_mostly;
187
188 /*
189 * Pointer to the device table which is shared by all AMD IOMMUs
190 * it is indexed by the PCI device id or the HT unit id and contains
191 * information about the domain the device belongs to as well as the
192 * page table root pointer.
193 */
194 struct dev_table_entry *amd_iommu_dev_table;
195 /*
196 * Pointer to a device table which the content of old device table
197 * will be copied to. It's only be used in kdump kernel.
198 */
199 static struct dev_table_entry *old_dev_tbl_cpy;
200
201 /*
202 * The alias table is a driver specific data structure which contains the
203 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
204 * More than one device can share the same requestor id.
205 */
206 u16 *amd_iommu_alias_table;
207
208 /*
209 * The rlookup table is used to find the IOMMU which is responsible
210 * for a specific device. It is also indexed by the PCI device id.
211 */
212 struct amd_iommu **amd_iommu_rlookup_table;
213
214 /*
215 * This table is used to find the irq remapping table for a given device id
216 * quickly.
217 */
218 struct irq_remap_table **irq_lookup_table;
219
220 /*
221 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
222 * to know which ones are already in use.
223 */
224 unsigned long *amd_iommu_pd_alloc_bitmap;
225
226 static u32 dev_table_size; /* size of the device table */
227 static u32 alias_table_size; /* size of the alias table */
228 static u32 rlookup_table_size; /* size if the rlookup table */
229
230 enum iommu_init_state {
231 IOMMU_START_STATE,
232 IOMMU_IVRS_DETECTED,
233 IOMMU_ACPI_FINISHED,
234 IOMMU_ENABLED,
235 IOMMU_PCI_INIT,
236 IOMMU_INTERRUPTS_EN,
237 IOMMU_INITIALIZED,
238 IOMMU_NOT_FOUND,
239 IOMMU_INIT_ERROR,
240 IOMMU_CMDLINE_DISABLED,
241 };
242
243 /* Early ioapic and hpet maps from kernel command line */
244 #define EARLY_MAP_SIZE 4
245 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
246 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
247 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
248
249 static int __initdata early_ioapic_map_size;
250 static int __initdata early_hpet_map_size;
251 static int __initdata early_acpihid_map_size;
252
253 static bool __initdata cmdline_maps;
254
255 static enum iommu_init_state init_state = IOMMU_START_STATE;
256
257 static int amd_iommu_enable_interrupts(void);
258 static int __init iommu_go_to_state(enum iommu_init_state state);
259 static void init_device_table_dma(void);
260
261 static bool amd_iommu_pre_enabled = true;
262
263 static u32 amd_iommu_ivinfo __initdata;
264
translation_pre_enabled(struct amd_iommu * iommu)265 bool translation_pre_enabled(struct amd_iommu *iommu)
266 {
267 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
268 }
269
clear_translation_pre_enabled(struct amd_iommu * iommu)270 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
271 {
272 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
273 }
274
init_translation_status(struct amd_iommu * iommu)275 static void init_translation_status(struct amd_iommu *iommu)
276 {
277 u64 ctrl;
278
279 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
280 if (ctrl & (1<<CONTROL_IOMMU_EN))
281 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
282 }
283
update_last_devid(u16 devid)284 static inline void update_last_devid(u16 devid)
285 {
286 if (devid > amd_iommu_last_bdf)
287 amd_iommu_last_bdf = devid;
288 }
289
tbl_size(int entry_size)290 static inline unsigned long tbl_size(int entry_size)
291 {
292 unsigned shift = PAGE_SHIFT +
293 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
294
295 return 1UL << shift;
296 }
297
amd_iommu_get_num_iommus(void)298 int amd_iommu_get_num_iommus(void)
299 {
300 return amd_iommus_present;
301 }
302
303 #ifdef CONFIG_IRQ_REMAP
check_feature_on_all_iommus(u64 mask)304 static bool check_feature_on_all_iommus(u64 mask)
305 {
306 bool ret = false;
307 struct amd_iommu *iommu;
308
309 for_each_iommu(iommu) {
310 ret = iommu_feature(iommu, mask);
311 if (!ret)
312 return false;
313 }
314
315 return true;
316 }
317 #endif
318
319 /*
320 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
321 * Default to IVHD EFR since it is available sooner
322 * (i.e. before PCI init).
323 */
early_iommu_features_init(struct amd_iommu * iommu,struct ivhd_header * h)324 static void __init early_iommu_features_init(struct amd_iommu *iommu,
325 struct ivhd_header *h)
326 {
327 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
328 iommu->features = h->efr_reg;
329 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
330 amdr_ivrs_remap_support = true;
331 }
332
333 /* Access to l1 and l2 indexed register spaces */
334
iommu_read_l1(struct amd_iommu * iommu,u16 l1,u8 address)335 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
336 {
337 u32 val;
338
339 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
340 pci_read_config_dword(iommu->dev, 0xfc, &val);
341 return val;
342 }
343
iommu_write_l1(struct amd_iommu * iommu,u16 l1,u8 address,u32 val)344 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
345 {
346 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
347 pci_write_config_dword(iommu->dev, 0xfc, val);
348 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
349 }
350
iommu_read_l2(struct amd_iommu * iommu,u8 address)351 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
352 {
353 u32 val;
354
355 pci_write_config_dword(iommu->dev, 0xf0, address);
356 pci_read_config_dword(iommu->dev, 0xf4, &val);
357 return val;
358 }
359
iommu_write_l2(struct amd_iommu * iommu,u8 address,u32 val)360 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
361 {
362 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
363 pci_write_config_dword(iommu->dev, 0xf4, val);
364 }
365
366 /****************************************************************************
367 *
368 * AMD IOMMU MMIO register space handling functions
369 *
370 * These functions are used to program the IOMMU device registers in
371 * MMIO space required for that driver.
372 *
373 ****************************************************************************/
374
375 /*
376 * This function set the exclusion range in the IOMMU. DMA accesses to the
377 * exclusion range are passed through untranslated
378 */
iommu_set_exclusion_range(struct amd_iommu * iommu)379 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
380 {
381 u64 start = iommu->exclusion_start & PAGE_MASK;
382 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
383 u64 entry;
384
385 if (!iommu->exclusion_start)
386 return;
387
388 entry = start | MMIO_EXCL_ENABLE_MASK;
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
390 &entry, sizeof(entry));
391
392 entry = limit;
393 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
394 &entry, sizeof(entry));
395 }
396
iommu_set_cwwb_range(struct amd_iommu * iommu)397 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
398 {
399 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
400 u64 entry = start & PM_ADDR_MASK;
401
402 if (!iommu_feature(iommu, FEATURE_SNP))
403 return;
404
405 /* Note:
406 * Re-purpose Exclusion base/limit registers for Completion wait
407 * write-back base/limit.
408 */
409 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
410 &entry, sizeof(entry));
411
412 /* Note:
413 * Default to 4 Kbytes, which can be specified by setting base
414 * address equal to the limit address.
415 */
416 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
417 &entry, sizeof(entry));
418 }
419
420 /* Programs the physical address of the device table into the IOMMU hardware */
iommu_set_device_table(struct amd_iommu * iommu)421 static void iommu_set_device_table(struct amd_iommu *iommu)
422 {
423 u64 entry;
424
425 BUG_ON(iommu->mmio_base == NULL);
426
427 entry = iommu_virt_to_phys(amd_iommu_dev_table);
428 entry |= (dev_table_size >> 12) - 1;
429 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
430 &entry, sizeof(entry));
431 }
432
433 /* Generic functions to enable/disable certain features of the IOMMU. */
iommu_feature_enable(struct amd_iommu * iommu,u8 bit)434 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
435 {
436 u64 ctrl;
437
438 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
439 ctrl |= (1ULL << bit);
440 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
441 }
442
iommu_feature_disable(struct amd_iommu * iommu,u8 bit)443 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
444 {
445 u64 ctrl;
446
447 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
448 ctrl &= ~(1ULL << bit);
449 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
450 }
451
iommu_set_inv_tlb_timeout(struct amd_iommu * iommu,int timeout)452 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
453 {
454 u64 ctrl;
455
456 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
457 ctrl &= ~CTRL_INV_TO_MASK;
458 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
459 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
460 }
461
462 /* Function to enable the hardware */
iommu_enable(struct amd_iommu * iommu)463 static void iommu_enable(struct amd_iommu *iommu)
464 {
465 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
466 }
467
iommu_disable(struct amd_iommu * iommu)468 static void iommu_disable(struct amd_iommu *iommu)
469 {
470 if (!iommu->mmio_base)
471 return;
472
473 /* Disable command buffer */
474 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
475
476 /* Disable event logging and event interrupts */
477 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
478 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
479
480 /* Disable IOMMU GA_LOG */
481 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
482 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
483
484 /* Disable IOMMU hardware itself */
485 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
486 }
487
488 /*
489 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
490 * the system has one.
491 */
iommu_map_mmio_space(u64 address,u64 end)492 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
493 {
494 if (!request_mem_region(address, end, "amd_iommu")) {
495 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
496 address, end);
497 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
498 return NULL;
499 }
500
501 return (u8 __iomem *)ioremap(address, end);
502 }
503
iommu_unmap_mmio_space(struct amd_iommu * iommu)504 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
505 {
506 if (iommu->mmio_base)
507 iounmap(iommu->mmio_base);
508 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
509 }
510
get_ivhd_header_size(struct ivhd_header * h)511 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
512 {
513 u32 size = 0;
514
515 switch (h->type) {
516 case 0x10:
517 size = 24;
518 break;
519 case 0x11:
520 case 0x40:
521 size = 40;
522 break;
523 }
524 return size;
525 }
526
527 /****************************************************************************
528 *
529 * The functions below belong to the first pass of AMD IOMMU ACPI table
530 * parsing. In this pass we try to find out the highest device id this
531 * code has to handle. Upon this information the size of the shared data
532 * structures is determined later.
533 *
534 ****************************************************************************/
535
536 /*
537 * This function calculates the length of a given IVHD entry
538 */
ivhd_entry_length(u8 * ivhd)539 static inline int ivhd_entry_length(u8 *ivhd)
540 {
541 u32 type = ((struct ivhd_entry *)ivhd)->type;
542
543 if (type < 0x80) {
544 return 0x04 << (*ivhd >> 6);
545 } else if (type == IVHD_DEV_ACPI_HID) {
546 /* For ACPI_HID, offset 21 is uid len */
547 return *((u8 *)ivhd + 21) + 22;
548 }
549 return 0;
550 }
551
552 /*
553 * After reading the highest device id from the IOMMU PCI capability header
554 * this function looks if there is a higher device id defined in the ACPI table
555 */
find_last_devid_from_ivhd(struct ivhd_header * h)556 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
557 {
558 u8 *p = (void *)h, *end = (void *)h;
559 struct ivhd_entry *dev;
560
561 u32 ivhd_size = get_ivhd_header_size(h);
562
563 if (!ivhd_size) {
564 pr_err("Unsupported IVHD type %#x\n", h->type);
565 return -EINVAL;
566 }
567
568 p += ivhd_size;
569 end += h->length;
570
571 while (p < end) {
572 dev = (struct ivhd_entry *)p;
573 switch (dev->type) {
574 case IVHD_DEV_ALL:
575 /* Use maximum BDF value for DEV_ALL */
576 update_last_devid(0xffff);
577 break;
578 case IVHD_DEV_SELECT:
579 case IVHD_DEV_RANGE_END:
580 case IVHD_DEV_ALIAS:
581 case IVHD_DEV_EXT_SELECT:
582 /* all the above subfield types refer to device ids */
583 update_last_devid(dev->devid);
584 break;
585 default:
586 break;
587 }
588 p += ivhd_entry_length(p);
589 }
590
591 WARN_ON(p != end);
592
593 return 0;
594 }
595
check_ivrs_checksum(struct acpi_table_header * table)596 static int __init check_ivrs_checksum(struct acpi_table_header *table)
597 {
598 int i;
599 u8 checksum = 0, *p = (u8 *)table;
600
601 for (i = 0; i < table->length; ++i)
602 checksum += p[i];
603 if (checksum != 0) {
604 /* ACPI table corrupt */
605 pr_err(FW_BUG "IVRS invalid checksum\n");
606 return -ENODEV;
607 }
608
609 return 0;
610 }
611
612 /*
613 * Iterate over all IVHD entries in the ACPI table and find the highest device
614 * id which we need to handle. This is the first of three functions which parse
615 * the ACPI table. So we check the checksum here.
616 */
find_last_devid_acpi(struct acpi_table_header * table)617 static int __init find_last_devid_acpi(struct acpi_table_header *table)
618 {
619 u8 *p = (u8 *)table, *end = (u8 *)table;
620 struct ivhd_header *h;
621
622 p += IVRS_HEADER_LENGTH;
623
624 end += table->length;
625 while (p < end) {
626 h = (struct ivhd_header *)p;
627 if (h->type == amd_iommu_target_ivhd_type) {
628 int ret = find_last_devid_from_ivhd(h);
629
630 if (ret)
631 return ret;
632 }
633 p += h->length;
634 }
635 WARN_ON(p != end);
636
637 return 0;
638 }
639
640 /****************************************************************************
641 *
642 * The following functions belong to the code path which parses the ACPI table
643 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
644 * data structures, initialize the device/alias/rlookup table and also
645 * basically initialize the hardware.
646 *
647 ****************************************************************************/
648
649 /*
650 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
651 * write commands to that buffer later and the IOMMU will execute them
652 * asynchronously
653 */
alloc_command_buffer(struct amd_iommu * iommu)654 static int __init alloc_command_buffer(struct amd_iommu *iommu)
655 {
656 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
657 get_order(CMD_BUFFER_SIZE));
658
659 return iommu->cmd_buf ? 0 : -ENOMEM;
660 }
661
662 /*
663 * This function restarts event logging in case the IOMMU experienced
664 * an event log buffer overflow.
665 */
amd_iommu_restart_event_logging(struct amd_iommu * iommu)666 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
667 {
668 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
669 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
670 }
671
672 /*
673 * This function resets the command buffer if the IOMMU stopped fetching
674 * commands from it.
675 */
amd_iommu_reset_cmd_buffer(struct amd_iommu * iommu)676 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
677 {
678 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
679
680 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
681 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
682 iommu->cmd_buf_head = 0;
683 iommu->cmd_buf_tail = 0;
684
685 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
686 }
687
688 /*
689 * This function writes the command buffer address to the hardware and
690 * enables it.
691 */
iommu_enable_command_buffer(struct amd_iommu * iommu)692 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
693 {
694 u64 entry;
695
696 BUG_ON(iommu->cmd_buf == NULL);
697
698 entry = iommu_virt_to_phys(iommu->cmd_buf);
699 entry |= MMIO_CMD_SIZE_512;
700
701 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
702 &entry, sizeof(entry));
703
704 amd_iommu_reset_cmd_buffer(iommu);
705 }
706
707 /*
708 * This function disables the command buffer
709 */
iommu_disable_command_buffer(struct amd_iommu * iommu)710 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
711 {
712 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
713 }
714
free_command_buffer(struct amd_iommu * iommu)715 static void __init free_command_buffer(struct amd_iommu *iommu)
716 {
717 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
718 }
719
iommu_alloc_4k_pages(struct amd_iommu * iommu,gfp_t gfp,size_t size)720 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
721 gfp_t gfp, size_t size)
722 {
723 int order = get_order(size);
724 void *buf = (void *)__get_free_pages(gfp, order);
725
726 if (buf &&
727 iommu_feature(iommu, FEATURE_SNP) &&
728 set_memory_4k((unsigned long)buf, (1 << order))) {
729 free_pages((unsigned long)buf, order);
730 buf = NULL;
731 }
732
733 return buf;
734 }
735
736 /* allocates the memory where the IOMMU will log its events to */
alloc_event_buffer(struct amd_iommu * iommu)737 static int __init alloc_event_buffer(struct amd_iommu *iommu)
738 {
739 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
740 EVT_BUFFER_SIZE);
741
742 return iommu->evt_buf ? 0 : -ENOMEM;
743 }
744
iommu_enable_event_buffer(struct amd_iommu * iommu)745 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
746 {
747 u64 entry;
748
749 BUG_ON(iommu->evt_buf == NULL);
750
751 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
752
753 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
754 &entry, sizeof(entry));
755
756 /* set head and tail to zero manually */
757 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
758 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
759
760 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
761 }
762
763 /*
764 * This function disables the event log buffer
765 */
iommu_disable_event_buffer(struct amd_iommu * iommu)766 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
767 {
768 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
769 }
770
free_event_buffer(struct amd_iommu * iommu)771 static void __init free_event_buffer(struct amd_iommu *iommu)
772 {
773 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
774 }
775
776 /* allocates the memory where the IOMMU will log its events to */
alloc_ppr_log(struct amd_iommu * iommu)777 static int __init alloc_ppr_log(struct amd_iommu *iommu)
778 {
779 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
780 PPR_LOG_SIZE);
781
782 return iommu->ppr_log ? 0 : -ENOMEM;
783 }
784
iommu_enable_ppr_log(struct amd_iommu * iommu)785 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
786 {
787 u64 entry;
788
789 if (iommu->ppr_log == NULL)
790 return;
791
792 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
793
794 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
795 &entry, sizeof(entry));
796
797 /* set head and tail to zero manually */
798 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
799 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
800
801 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
802 iommu_feature_enable(iommu, CONTROL_PPR_EN);
803 }
804
free_ppr_log(struct amd_iommu * iommu)805 static void __init free_ppr_log(struct amd_iommu *iommu)
806 {
807 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
808 }
809
free_ga_log(struct amd_iommu * iommu)810 static void free_ga_log(struct amd_iommu *iommu)
811 {
812 #ifdef CONFIG_IRQ_REMAP
813 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
814 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
815 #endif
816 }
817
iommu_ga_log_enable(struct amd_iommu * iommu)818 static int iommu_ga_log_enable(struct amd_iommu *iommu)
819 {
820 #ifdef CONFIG_IRQ_REMAP
821 u32 status, i;
822 u64 entry;
823
824 if (!iommu->ga_log)
825 return -EINVAL;
826
827 /* Check if already running */
828 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
829 if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
830 return 0;
831
832 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
833 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
834 &entry, sizeof(entry));
835 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
836 (BIT_ULL(52)-1)) & ~7ULL;
837 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
838 &entry, sizeof(entry));
839 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
840 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
841
842
843 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
844 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
845
846 for (i = 0; i < LOOP_TIMEOUT; ++i) {
847 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
848 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
849 break;
850 udelay(10);
851 }
852
853 if (WARN_ON(i >= LOOP_TIMEOUT))
854 return -EINVAL;
855 #endif /* CONFIG_IRQ_REMAP */
856 return 0;
857 }
858
iommu_init_ga_log(struct amd_iommu * iommu)859 static int iommu_init_ga_log(struct amd_iommu *iommu)
860 {
861 #ifdef CONFIG_IRQ_REMAP
862 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
863 return 0;
864
865 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
866 get_order(GA_LOG_SIZE));
867 if (!iommu->ga_log)
868 goto err_out;
869
870 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
871 get_order(8));
872 if (!iommu->ga_log_tail)
873 goto err_out;
874
875 return 0;
876 err_out:
877 free_ga_log(iommu);
878 return -EINVAL;
879 #else
880 return 0;
881 #endif /* CONFIG_IRQ_REMAP */
882 }
883
alloc_cwwb_sem(struct amd_iommu * iommu)884 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
885 {
886 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
887
888 return iommu->cmd_sem ? 0 : -ENOMEM;
889 }
890
free_cwwb_sem(struct amd_iommu * iommu)891 static void __init free_cwwb_sem(struct amd_iommu *iommu)
892 {
893 if (iommu->cmd_sem)
894 free_page((unsigned long)iommu->cmd_sem);
895 }
896
iommu_enable_xt(struct amd_iommu * iommu)897 static void iommu_enable_xt(struct amd_iommu *iommu)
898 {
899 #ifdef CONFIG_IRQ_REMAP
900 /*
901 * XT mode (32-bit APIC destination ID) requires
902 * GA mode (128-bit IRTE support) as a prerequisite.
903 */
904 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
905 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
906 iommu_feature_enable(iommu, CONTROL_XT_EN);
907 #endif /* CONFIG_IRQ_REMAP */
908 }
909
iommu_enable_gt(struct amd_iommu * iommu)910 static void iommu_enable_gt(struct amd_iommu *iommu)
911 {
912 if (!iommu_feature(iommu, FEATURE_GT))
913 return;
914
915 iommu_feature_enable(iommu, CONTROL_GT_EN);
916 }
917
918 /* sets a specific bit in the device table entry. */
set_dev_entry_bit(u16 devid,u8 bit)919 static void set_dev_entry_bit(u16 devid, u8 bit)
920 {
921 int i = (bit >> 6) & 0x03;
922 int _bit = bit & 0x3f;
923
924 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
925 }
926
get_dev_entry_bit(u16 devid,u8 bit)927 static int get_dev_entry_bit(u16 devid, u8 bit)
928 {
929 int i = (bit >> 6) & 0x03;
930 int _bit = bit & 0x3f;
931
932 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
933 }
934
935
copy_device_table(void)936 static bool copy_device_table(void)
937 {
938 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
939 struct dev_table_entry *old_devtb = NULL;
940 u32 lo, hi, devid, old_devtb_size;
941 phys_addr_t old_devtb_phys;
942 struct amd_iommu *iommu;
943 u16 dom_id, dte_v, irq_v;
944 gfp_t gfp_flag;
945 u64 tmp;
946
947 if (!amd_iommu_pre_enabled)
948 return false;
949
950 pr_warn("Translation is already enabled - trying to copy translation structures\n");
951 for_each_iommu(iommu) {
952 /* All IOMMUs should use the same device table with the same size */
953 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
954 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
955 entry = (((u64) hi) << 32) + lo;
956 if (last_entry && last_entry != entry) {
957 pr_err("IOMMU:%d should use the same dev table as others!\n",
958 iommu->index);
959 return false;
960 }
961 last_entry = entry;
962
963 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
964 if (old_devtb_size != dev_table_size) {
965 pr_err("The device table size of IOMMU:%d is not expected!\n",
966 iommu->index);
967 return false;
968 }
969 }
970
971 /*
972 * When SME is enabled in the first kernel, the entry includes the
973 * memory encryption mask(sme_me_mask), we must remove the memory
974 * encryption mask to obtain the true physical address in kdump kernel.
975 */
976 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
977
978 if (old_devtb_phys >= 0x100000000ULL) {
979 pr_err("The address of old device table is above 4G, not trustworthy!\n");
980 return false;
981 }
982 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
983 ? (__force void *)ioremap_encrypted(old_devtb_phys,
984 dev_table_size)
985 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
986
987 if (!old_devtb)
988 return false;
989
990 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
991 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
992 get_order(dev_table_size));
993 if (old_dev_tbl_cpy == NULL) {
994 pr_err("Failed to allocate memory for copying old device table!\n");
995 memunmap(old_devtb);
996 return false;
997 }
998
999 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1000 old_dev_tbl_cpy[devid] = old_devtb[devid];
1001 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1002 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1003
1004 if (dte_v && dom_id) {
1005 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1006 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1007 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1008 /* If gcr3 table existed, mask it out */
1009 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1010 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1011 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1012 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1013 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1014 tmp |= DTE_FLAG_GV;
1015 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1016 }
1017 }
1018
1019 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1020 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1021 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1022 if (irq_v && (int_ctl || int_tab_len)) {
1023 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1024 (int_tab_len != DTE_INTTABLEN)) {
1025 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1026 memunmap(old_devtb);
1027 return false;
1028 }
1029
1030 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1031 }
1032 }
1033 memunmap(old_devtb);
1034
1035 return true;
1036 }
1037
amd_iommu_apply_erratum_63(u16 devid)1038 void amd_iommu_apply_erratum_63(u16 devid)
1039 {
1040 int sysmgt;
1041
1042 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1043 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1044
1045 if (sysmgt == 0x01)
1046 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1047 }
1048
1049 /* Writes the specific IOMMU for a device into the rlookup table */
set_iommu_for_device(struct amd_iommu * iommu,u16 devid)1050 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1051 {
1052 amd_iommu_rlookup_table[devid] = iommu;
1053 }
1054
1055 /*
1056 * This function takes the device specific flags read from the ACPI
1057 * table and sets up the device table entry with that information
1058 */
set_dev_entry_from_acpi(struct amd_iommu * iommu,u16 devid,u32 flags,u32 ext_flags)1059 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1060 u16 devid, u32 flags, u32 ext_flags)
1061 {
1062 if (flags & ACPI_DEVFLAG_INITPASS)
1063 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1064 if (flags & ACPI_DEVFLAG_EXTINT)
1065 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1066 if (flags & ACPI_DEVFLAG_NMI)
1067 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1068 if (flags & ACPI_DEVFLAG_SYSMGT1)
1069 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1070 if (flags & ACPI_DEVFLAG_SYSMGT2)
1071 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1072 if (flags & ACPI_DEVFLAG_LINT0)
1073 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1074 if (flags & ACPI_DEVFLAG_LINT1)
1075 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1076
1077 amd_iommu_apply_erratum_63(devid);
1078
1079 set_iommu_for_device(iommu, devid);
1080 }
1081
add_special_device(u8 type,u8 id,u16 * devid,bool cmd_line)1082 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1083 {
1084 struct devid_map *entry;
1085 struct list_head *list;
1086
1087 if (type == IVHD_SPECIAL_IOAPIC)
1088 list = &ioapic_map;
1089 else if (type == IVHD_SPECIAL_HPET)
1090 list = &hpet_map;
1091 else
1092 return -EINVAL;
1093
1094 list_for_each_entry(entry, list, list) {
1095 if (!(entry->id == id && entry->cmd_line))
1096 continue;
1097
1098 pr_info("Command-line override present for %s id %d - ignoring\n",
1099 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1100
1101 *devid = entry->devid;
1102
1103 return 0;
1104 }
1105
1106 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1107 if (!entry)
1108 return -ENOMEM;
1109
1110 entry->id = id;
1111 entry->devid = *devid;
1112 entry->cmd_line = cmd_line;
1113
1114 list_add_tail(&entry->list, list);
1115
1116 return 0;
1117 }
1118
add_acpi_hid_device(u8 * hid,u8 * uid,u16 * devid,bool cmd_line)1119 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1120 bool cmd_line)
1121 {
1122 struct acpihid_map_entry *entry;
1123 struct list_head *list = &acpihid_map;
1124
1125 list_for_each_entry(entry, list, list) {
1126 if (strcmp(entry->hid, hid) ||
1127 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1128 !entry->cmd_line)
1129 continue;
1130
1131 pr_info("Command-line override for hid:%s uid:%s\n",
1132 hid, uid);
1133 *devid = entry->devid;
1134 return 0;
1135 }
1136
1137 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1138 if (!entry)
1139 return -ENOMEM;
1140
1141 memcpy(entry->uid, uid, strlen(uid));
1142 memcpy(entry->hid, hid, strlen(hid));
1143 entry->devid = *devid;
1144 entry->cmd_line = cmd_line;
1145 entry->root_devid = (entry->devid & (~0x7));
1146
1147 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1148 entry->cmd_line ? "cmd" : "ivrs",
1149 entry->hid, entry->uid, entry->root_devid);
1150
1151 list_add_tail(&entry->list, list);
1152 return 0;
1153 }
1154
add_early_maps(void)1155 static int __init add_early_maps(void)
1156 {
1157 int i, ret;
1158
1159 for (i = 0; i < early_ioapic_map_size; ++i) {
1160 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1161 early_ioapic_map[i].id,
1162 &early_ioapic_map[i].devid,
1163 early_ioapic_map[i].cmd_line);
1164 if (ret)
1165 return ret;
1166 }
1167
1168 for (i = 0; i < early_hpet_map_size; ++i) {
1169 ret = add_special_device(IVHD_SPECIAL_HPET,
1170 early_hpet_map[i].id,
1171 &early_hpet_map[i].devid,
1172 early_hpet_map[i].cmd_line);
1173 if (ret)
1174 return ret;
1175 }
1176
1177 for (i = 0; i < early_acpihid_map_size; ++i) {
1178 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1179 early_acpihid_map[i].uid,
1180 &early_acpihid_map[i].devid,
1181 early_acpihid_map[i].cmd_line);
1182 if (ret)
1183 return ret;
1184 }
1185
1186 return 0;
1187 }
1188
1189 /*
1190 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1191 * initializes the hardware and our data structures with it.
1192 */
init_iommu_from_acpi(struct amd_iommu * iommu,struct ivhd_header * h)1193 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1194 struct ivhd_header *h)
1195 {
1196 u8 *p = (u8 *)h;
1197 u8 *end = p, flags = 0;
1198 u16 devid = 0, devid_start = 0, devid_to = 0;
1199 u32 dev_i, ext_flags = 0;
1200 bool alias = false;
1201 struct ivhd_entry *e;
1202 u32 ivhd_size;
1203 int ret;
1204
1205
1206 ret = add_early_maps();
1207 if (ret)
1208 return ret;
1209
1210 amd_iommu_apply_ivrs_quirks();
1211
1212 /*
1213 * First save the recommended feature enable bits from ACPI
1214 */
1215 iommu->acpi_flags = h->flags;
1216
1217 /*
1218 * Done. Now parse the device entries
1219 */
1220 ivhd_size = get_ivhd_header_size(h);
1221 if (!ivhd_size) {
1222 pr_err("Unsupported IVHD type %#x\n", h->type);
1223 return -EINVAL;
1224 }
1225
1226 p += ivhd_size;
1227
1228 end += h->length;
1229
1230
1231 while (p < end) {
1232 e = (struct ivhd_entry *)p;
1233 switch (e->type) {
1234 case IVHD_DEV_ALL:
1235
1236 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1237
1238 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1239 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1240 break;
1241 case IVHD_DEV_SELECT:
1242
1243 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1244 "flags: %02x\n",
1245 PCI_BUS_NUM(e->devid),
1246 PCI_SLOT(e->devid),
1247 PCI_FUNC(e->devid),
1248 e->flags);
1249
1250 devid = e->devid;
1251 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1252 break;
1253 case IVHD_DEV_SELECT_RANGE_START:
1254
1255 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1256 "devid: %02x:%02x.%x flags: %02x\n",
1257 PCI_BUS_NUM(e->devid),
1258 PCI_SLOT(e->devid),
1259 PCI_FUNC(e->devid),
1260 e->flags);
1261
1262 devid_start = e->devid;
1263 flags = e->flags;
1264 ext_flags = 0;
1265 alias = false;
1266 break;
1267 case IVHD_DEV_ALIAS:
1268
1269 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1270 "flags: %02x devid_to: %02x:%02x.%x\n",
1271 PCI_BUS_NUM(e->devid),
1272 PCI_SLOT(e->devid),
1273 PCI_FUNC(e->devid),
1274 e->flags,
1275 PCI_BUS_NUM(e->ext >> 8),
1276 PCI_SLOT(e->ext >> 8),
1277 PCI_FUNC(e->ext >> 8));
1278
1279 devid = e->devid;
1280 devid_to = e->ext >> 8;
1281 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1282 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1283 amd_iommu_alias_table[devid] = devid_to;
1284 break;
1285 case IVHD_DEV_ALIAS_RANGE:
1286
1287 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1288 "devid: %02x:%02x.%x flags: %02x "
1289 "devid_to: %02x:%02x.%x\n",
1290 PCI_BUS_NUM(e->devid),
1291 PCI_SLOT(e->devid),
1292 PCI_FUNC(e->devid),
1293 e->flags,
1294 PCI_BUS_NUM(e->ext >> 8),
1295 PCI_SLOT(e->ext >> 8),
1296 PCI_FUNC(e->ext >> 8));
1297
1298 devid_start = e->devid;
1299 flags = e->flags;
1300 devid_to = e->ext >> 8;
1301 ext_flags = 0;
1302 alias = true;
1303 break;
1304 case IVHD_DEV_EXT_SELECT:
1305
1306 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1307 "flags: %02x ext: %08x\n",
1308 PCI_BUS_NUM(e->devid),
1309 PCI_SLOT(e->devid),
1310 PCI_FUNC(e->devid),
1311 e->flags, e->ext);
1312
1313 devid = e->devid;
1314 set_dev_entry_from_acpi(iommu, devid, e->flags,
1315 e->ext);
1316 break;
1317 case IVHD_DEV_EXT_SELECT_RANGE:
1318
1319 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1320 "%02x:%02x.%x flags: %02x ext: %08x\n",
1321 PCI_BUS_NUM(e->devid),
1322 PCI_SLOT(e->devid),
1323 PCI_FUNC(e->devid),
1324 e->flags, e->ext);
1325
1326 devid_start = e->devid;
1327 flags = e->flags;
1328 ext_flags = e->ext;
1329 alias = false;
1330 break;
1331 case IVHD_DEV_RANGE_END:
1332
1333 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1334 PCI_BUS_NUM(e->devid),
1335 PCI_SLOT(e->devid),
1336 PCI_FUNC(e->devid));
1337
1338 devid = e->devid;
1339 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1340 if (alias) {
1341 amd_iommu_alias_table[dev_i] = devid_to;
1342 set_dev_entry_from_acpi(iommu,
1343 devid_to, flags, ext_flags);
1344 }
1345 set_dev_entry_from_acpi(iommu, dev_i,
1346 flags, ext_flags);
1347 }
1348 break;
1349 case IVHD_DEV_SPECIAL: {
1350 u8 handle, type;
1351 const char *var;
1352 u16 devid;
1353 int ret;
1354
1355 handle = e->ext & 0xff;
1356 devid = (e->ext >> 8) & 0xffff;
1357 type = (e->ext >> 24) & 0xff;
1358
1359 if (type == IVHD_SPECIAL_IOAPIC)
1360 var = "IOAPIC";
1361 else if (type == IVHD_SPECIAL_HPET)
1362 var = "HPET";
1363 else
1364 var = "UNKNOWN";
1365
1366 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1367 var, (int)handle,
1368 PCI_BUS_NUM(devid),
1369 PCI_SLOT(devid),
1370 PCI_FUNC(devid));
1371
1372 ret = add_special_device(type, handle, &devid, false);
1373 if (ret)
1374 return ret;
1375
1376 /*
1377 * add_special_device might update the devid in case a
1378 * command-line override is present. So call
1379 * set_dev_entry_from_acpi after add_special_device.
1380 */
1381 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1382
1383 break;
1384 }
1385 case IVHD_DEV_ACPI_HID: {
1386 u16 devid;
1387 u8 hid[ACPIHID_HID_LEN];
1388 u8 uid[ACPIHID_UID_LEN];
1389 int ret;
1390
1391 if (h->type != 0x40) {
1392 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1393 e->type);
1394 break;
1395 }
1396
1397 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1398 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1399 hid[ACPIHID_HID_LEN - 1] = '\0';
1400
1401 if (!(*hid)) {
1402 pr_err(FW_BUG "Invalid HID.\n");
1403 break;
1404 }
1405
1406 uid[0] = '\0';
1407 switch (e->uidf) {
1408 case UID_NOT_PRESENT:
1409
1410 if (e->uidl != 0)
1411 pr_warn(FW_BUG "Invalid UID length.\n");
1412
1413 break;
1414 case UID_IS_INTEGER:
1415
1416 sprintf(uid, "%d", e->uid);
1417
1418 break;
1419 case UID_IS_CHARACTER:
1420
1421 memcpy(uid, &e->uid, e->uidl);
1422 uid[e->uidl] = '\0';
1423
1424 break;
1425 default:
1426 break;
1427 }
1428
1429 devid = e->devid;
1430 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1431 hid, uid,
1432 PCI_BUS_NUM(devid),
1433 PCI_SLOT(devid),
1434 PCI_FUNC(devid));
1435
1436 flags = e->flags;
1437
1438 ret = add_acpi_hid_device(hid, uid, &devid, false);
1439 if (ret)
1440 return ret;
1441
1442 /*
1443 * add_special_device might update the devid in case a
1444 * command-line override is present. So call
1445 * set_dev_entry_from_acpi after add_special_device.
1446 */
1447 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1448
1449 break;
1450 }
1451 default:
1452 break;
1453 }
1454
1455 p += ivhd_entry_length(p);
1456 }
1457
1458 return 0;
1459 }
1460
free_iommu_one(struct amd_iommu * iommu)1461 static void __init free_iommu_one(struct amd_iommu *iommu)
1462 {
1463 free_cwwb_sem(iommu);
1464 free_command_buffer(iommu);
1465 free_event_buffer(iommu);
1466 free_ppr_log(iommu);
1467 free_ga_log(iommu);
1468 iommu_unmap_mmio_space(iommu);
1469 }
1470
free_iommu_all(void)1471 static void __init free_iommu_all(void)
1472 {
1473 struct amd_iommu *iommu, *next;
1474
1475 for_each_iommu_safe(iommu, next) {
1476 list_del(&iommu->list);
1477 free_iommu_one(iommu);
1478 kfree(iommu);
1479 }
1480 }
1481
1482 /*
1483 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1484 * Workaround:
1485 * BIOS should disable L2B micellaneous clock gating by setting
1486 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1487 */
amd_iommu_erratum_746_workaround(struct amd_iommu * iommu)1488 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1489 {
1490 u32 value;
1491
1492 if ((boot_cpu_data.x86 != 0x15) ||
1493 (boot_cpu_data.x86_model < 0x10) ||
1494 (boot_cpu_data.x86_model > 0x1f))
1495 return;
1496
1497 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1498 pci_read_config_dword(iommu->dev, 0xf4, &value);
1499
1500 if (value & BIT(2))
1501 return;
1502
1503 /* Select NB indirect register 0x90 and enable writing */
1504 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1505
1506 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1507 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1508
1509 /* Clear the enable writing bit */
1510 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1511 }
1512
1513 /*
1514 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1515 * Workaround:
1516 * BIOS should enable ATS write permission check by setting
1517 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1518 */
amd_iommu_ats_write_check_workaround(struct amd_iommu * iommu)1519 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1520 {
1521 u32 value;
1522
1523 if ((boot_cpu_data.x86 != 0x15) ||
1524 (boot_cpu_data.x86_model < 0x30) ||
1525 (boot_cpu_data.x86_model > 0x3f))
1526 return;
1527
1528 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1529 value = iommu_read_l2(iommu, 0x47);
1530
1531 if (value & BIT(0))
1532 return;
1533
1534 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1535 iommu_write_l2(iommu, 0x47, value | BIT(0));
1536
1537 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1538 }
1539
1540 /*
1541 * This function glues the initialization function for one IOMMU
1542 * together and also allocates the command buffer and programs the
1543 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1544 */
init_iommu_one(struct amd_iommu * iommu,struct ivhd_header * h)1545 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1546 {
1547 int ret;
1548
1549 raw_spin_lock_init(&iommu->lock);
1550 iommu->cmd_sem_val = 0;
1551
1552 /* Add IOMMU to internal data structures */
1553 list_add_tail(&iommu->list, &amd_iommu_list);
1554 iommu->index = amd_iommus_present++;
1555
1556 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1557 WARN(1, "System has more IOMMUs than supported by this driver\n");
1558 return -ENOSYS;
1559 }
1560
1561 /* Index is fine - add IOMMU to the array */
1562 amd_iommus[iommu->index] = iommu;
1563
1564 /*
1565 * Copy data from ACPI table entry to the iommu struct
1566 */
1567 iommu->devid = h->devid;
1568 iommu->cap_ptr = h->cap_ptr;
1569 iommu->pci_seg = h->pci_seg;
1570 iommu->mmio_phys = h->mmio_phys;
1571
1572 switch (h->type) {
1573 case 0x10:
1574 /* Check if IVHD EFR contains proper max banks/counters */
1575 if ((h->efr_attr != 0) &&
1576 ((h->efr_attr & (0xF << 13)) != 0) &&
1577 ((h->efr_attr & (0x3F << 17)) != 0))
1578 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1579 else
1580 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1581
1582 /*
1583 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1584 * GAM also requires GA mode. Therefore, we need to
1585 * check cmpxchg16b support before enabling it.
1586 */
1587 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1588 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1589 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1590 break;
1591 case 0x11:
1592 case 0x40:
1593 if (h->efr_reg & (1 << 9))
1594 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1595 else
1596 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1597
1598 /*
1599 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1600 * XT, GAM also requires GA mode. Therefore, we need to
1601 * check cmpxchg16b support before enabling them.
1602 */
1603 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1604 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1605 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1606 break;
1607 }
1608
1609 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1610 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1611
1612 early_iommu_features_init(iommu, h);
1613
1614 break;
1615 default:
1616 return -EINVAL;
1617 }
1618
1619 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1620 iommu->mmio_phys_end);
1621 if (!iommu->mmio_base)
1622 return -ENOMEM;
1623
1624 if (alloc_cwwb_sem(iommu))
1625 return -ENOMEM;
1626
1627 if (alloc_command_buffer(iommu))
1628 return -ENOMEM;
1629
1630 if (alloc_event_buffer(iommu))
1631 return -ENOMEM;
1632
1633 iommu->int_enabled = false;
1634
1635 init_translation_status(iommu);
1636 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1637 iommu_disable(iommu);
1638 clear_translation_pre_enabled(iommu);
1639 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1640 iommu->index);
1641 }
1642 if (amd_iommu_pre_enabled)
1643 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1644
1645 ret = init_iommu_from_acpi(iommu, h);
1646 if (ret)
1647 return ret;
1648
1649 if (amd_iommu_irq_remap) {
1650 ret = amd_iommu_create_irq_domain(iommu);
1651 if (ret)
1652 return ret;
1653 }
1654
1655 /*
1656 * Make sure IOMMU is not considered to translate itself. The IVRS
1657 * table tells us so, but this is a lie!
1658 */
1659 amd_iommu_rlookup_table[iommu->devid] = NULL;
1660
1661 return 0;
1662 }
1663
1664 /**
1665 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1666 * @ivrs: Pointer to the IVRS header
1667 *
1668 * This function search through all IVDB of the maximum supported IVHD
1669 */
get_highest_supported_ivhd_type(struct acpi_table_header * ivrs)1670 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1671 {
1672 u8 *base = (u8 *)ivrs;
1673 struct ivhd_header *ivhd = (struct ivhd_header *)
1674 (base + IVRS_HEADER_LENGTH);
1675 u8 last_type = ivhd->type;
1676 u16 devid = ivhd->devid;
1677
1678 while (((u8 *)ivhd - base < ivrs->length) &&
1679 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1680 u8 *p = (u8 *) ivhd;
1681
1682 if (ivhd->devid == devid)
1683 last_type = ivhd->type;
1684 ivhd = (struct ivhd_header *)(p + ivhd->length);
1685 }
1686
1687 return last_type;
1688 }
1689
1690 /*
1691 * Iterates over all IOMMU entries in the ACPI table, allocates the
1692 * IOMMU structure and initializes it with init_iommu_one()
1693 */
init_iommu_all(struct acpi_table_header * table)1694 static int __init init_iommu_all(struct acpi_table_header *table)
1695 {
1696 u8 *p = (u8 *)table, *end = (u8 *)table;
1697 struct ivhd_header *h;
1698 struct amd_iommu *iommu;
1699 int ret;
1700
1701 end += table->length;
1702 p += IVRS_HEADER_LENGTH;
1703
1704 while (p < end) {
1705 h = (struct ivhd_header *)p;
1706 if (*p == amd_iommu_target_ivhd_type) {
1707
1708 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1709 "seg: %d flags: %01x info %04x\n",
1710 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1711 PCI_FUNC(h->devid), h->cap_ptr,
1712 h->pci_seg, h->flags, h->info);
1713 DUMP_printk(" mmio-addr: %016llx\n",
1714 h->mmio_phys);
1715
1716 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1717 if (iommu == NULL)
1718 return -ENOMEM;
1719
1720 ret = init_iommu_one(iommu, h);
1721 if (ret)
1722 return ret;
1723 }
1724 p += h->length;
1725
1726 }
1727 WARN_ON(p != end);
1728
1729 return 0;
1730 }
1731
init_iommu_perf_ctr(struct amd_iommu * iommu)1732 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1733 {
1734 u64 val;
1735 struct pci_dev *pdev = iommu->dev;
1736
1737 if (!iommu_feature(iommu, FEATURE_PC))
1738 return;
1739
1740 amd_iommu_pc_present = true;
1741
1742 pci_info(pdev, "IOMMU performance counters supported\n");
1743
1744 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1745 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1746 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1747
1748 return;
1749 }
1750
amd_iommu_show_cap(struct device * dev,struct device_attribute * attr,char * buf)1751 static ssize_t amd_iommu_show_cap(struct device *dev,
1752 struct device_attribute *attr,
1753 char *buf)
1754 {
1755 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1756 return sprintf(buf, "%x\n", iommu->cap);
1757 }
1758 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1759
amd_iommu_show_features(struct device * dev,struct device_attribute * attr,char * buf)1760 static ssize_t amd_iommu_show_features(struct device *dev,
1761 struct device_attribute *attr,
1762 char *buf)
1763 {
1764 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1765 return sprintf(buf, "%llx\n", iommu->features);
1766 }
1767 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1768
1769 static struct attribute *amd_iommu_attrs[] = {
1770 &dev_attr_cap.attr,
1771 &dev_attr_features.attr,
1772 NULL,
1773 };
1774
1775 static struct attribute_group amd_iommu_group = {
1776 .name = "amd-iommu",
1777 .attrs = amd_iommu_attrs,
1778 };
1779
1780 static const struct attribute_group *amd_iommu_groups[] = {
1781 &amd_iommu_group,
1782 NULL,
1783 };
1784
1785 /*
1786 * Note: IVHD 0x11 and 0x40 also contains exact copy
1787 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1788 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1789 */
late_iommu_features_init(struct amd_iommu * iommu)1790 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1791 {
1792 u64 features;
1793
1794 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1795 return;
1796
1797 /* read extended feature bits */
1798 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1799
1800 if (!iommu->features) {
1801 iommu->features = features;
1802 return;
1803 }
1804
1805 /*
1806 * Sanity check and warn if EFR values from
1807 * IVHD and MMIO conflict.
1808 */
1809 if (features != iommu->features)
1810 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
1811 features, iommu->features);
1812 }
1813
iommu_init_pci(struct amd_iommu * iommu)1814 static int __init iommu_init_pci(struct amd_iommu *iommu)
1815 {
1816 int cap_ptr = iommu->cap_ptr;
1817 int ret;
1818
1819 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1820 iommu->devid & 0xff);
1821 if (!iommu->dev)
1822 return -ENODEV;
1823
1824 /* Prevent binding other PCI device drivers to IOMMU devices */
1825 iommu->dev->match_driver = false;
1826
1827 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1828 &iommu->cap);
1829
1830 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1831 amd_iommu_iotlb_sup = false;
1832
1833 late_iommu_features_init(iommu);
1834
1835 if (iommu_feature(iommu, FEATURE_GT)) {
1836 int glxval;
1837 u32 max_pasid;
1838 u64 pasmax;
1839
1840 pasmax = iommu->features & FEATURE_PASID_MASK;
1841 pasmax >>= FEATURE_PASID_SHIFT;
1842 max_pasid = (1 << (pasmax + 1)) - 1;
1843
1844 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1845
1846 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1847
1848 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1849 glxval >>= FEATURE_GLXVAL_SHIFT;
1850
1851 if (amd_iommu_max_glx_val == -1)
1852 amd_iommu_max_glx_val = glxval;
1853 else
1854 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1855 }
1856
1857 if (iommu_feature(iommu, FEATURE_GT) &&
1858 iommu_feature(iommu, FEATURE_PPR)) {
1859 iommu->is_iommu_v2 = true;
1860 amd_iommu_v2_present = true;
1861 }
1862
1863 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1864 return -ENOMEM;
1865
1866 ret = iommu_init_ga_log(iommu);
1867 if (ret)
1868 return ret;
1869
1870 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
1871 pr_info("Using strict mode due to virtualization\n");
1872 iommu_set_dma_strict();
1873 amd_iommu_np_cache = true;
1874 }
1875
1876 init_iommu_perf_ctr(iommu);
1877
1878 if (is_rd890_iommu(iommu->dev)) {
1879 int i, j;
1880
1881 iommu->root_pdev =
1882 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1883 PCI_DEVFN(0, 0));
1884
1885 /*
1886 * Some rd890 systems may not be fully reconfigured by the
1887 * BIOS, so it's necessary for us to store this information so
1888 * it can be reprogrammed on resume
1889 */
1890 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1891 &iommu->stored_addr_lo);
1892 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1893 &iommu->stored_addr_hi);
1894
1895 /* Low bit locks writes to configuration space */
1896 iommu->stored_addr_lo &= ~1;
1897
1898 for (i = 0; i < 6; i++)
1899 for (j = 0; j < 0x12; j++)
1900 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1901
1902 for (i = 0; i < 0x83; i++)
1903 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1904 }
1905
1906 amd_iommu_erratum_746_workaround(iommu);
1907 amd_iommu_ats_write_check_workaround(iommu);
1908
1909 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1910 amd_iommu_groups, "ivhd%d", iommu->index);
1911 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
1912
1913 return pci_enable_device(iommu->dev);
1914 }
1915
print_iommu_info(void)1916 static void print_iommu_info(void)
1917 {
1918 static const char * const feat_str[] = {
1919 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1920 "IA", "GA", "HE", "PC"
1921 };
1922 struct amd_iommu *iommu;
1923
1924 for_each_iommu(iommu) {
1925 struct pci_dev *pdev = iommu->dev;
1926 int i;
1927
1928 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
1929
1930 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1931 pr_info("Extended features (%#llx):", iommu->features);
1932
1933 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1934 if (iommu_feature(iommu, (1ULL << i)))
1935 pr_cont(" %s", feat_str[i]);
1936 }
1937
1938 if (iommu->features & FEATURE_GAM_VAPIC)
1939 pr_cont(" GA_vAPIC");
1940
1941 pr_cont("\n");
1942 }
1943 }
1944 if (irq_remapping_enabled) {
1945 pr_info("Interrupt remapping enabled\n");
1946 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1947 pr_info("Virtual APIC enabled\n");
1948 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1949 pr_info("X2APIC enabled\n");
1950 }
1951 }
1952
amd_iommu_init_pci(void)1953 static int __init amd_iommu_init_pci(void)
1954 {
1955 struct amd_iommu *iommu;
1956 int ret;
1957
1958 for_each_iommu(iommu) {
1959 ret = iommu_init_pci(iommu);
1960 if (ret) {
1961 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
1962 iommu->index, ret);
1963 goto out;
1964 }
1965 /* Need to setup range after PCI init */
1966 iommu_set_cwwb_range(iommu);
1967 }
1968
1969 /*
1970 * Order is important here to make sure any unity map requirements are
1971 * fulfilled. The unity mappings are created and written to the device
1972 * table during the amd_iommu_init_api() call.
1973 *
1974 * After that we call init_device_table_dma() to make sure any
1975 * uninitialized DTE will block DMA, and in the end we flush the caches
1976 * of all IOMMUs to make sure the changes to the device table are
1977 * active.
1978 */
1979 ret = amd_iommu_init_api();
1980 if (ret) {
1981 pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
1982 ret);
1983 goto out;
1984 }
1985
1986 init_device_table_dma();
1987
1988 for_each_iommu(iommu)
1989 iommu_flush_all_caches(iommu);
1990
1991 print_iommu_info();
1992
1993 out:
1994 return ret;
1995 }
1996
1997 /****************************************************************************
1998 *
1999 * The following functions initialize the MSI interrupts for all IOMMUs
2000 * in the system. It's a bit challenging because there could be multiple
2001 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2002 * pci_dev.
2003 *
2004 ****************************************************************************/
2005
iommu_setup_msi(struct amd_iommu * iommu)2006 static int iommu_setup_msi(struct amd_iommu *iommu)
2007 {
2008 int r;
2009
2010 r = pci_enable_msi(iommu->dev);
2011 if (r)
2012 return r;
2013
2014 r = request_threaded_irq(iommu->dev->irq,
2015 amd_iommu_int_handler,
2016 amd_iommu_int_thread,
2017 0, "AMD-Vi",
2018 iommu);
2019
2020 if (r) {
2021 pci_disable_msi(iommu->dev);
2022 return r;
2023 }
2024
2025 return 0;
2026 }
2027
2028 union intcapxt {
2029 u64 capxt;
2030 struct {
2031 u64 reserved_0 : 2,
2032 dest_mode_logical : 1,
2033 reserved_1 : 5,
2034 destid_0_23 : 24,
2035 vector : 8,
2036 reserved_2 : 16,
2037 destid_24_31 : 8;
2038 };
2039 } __attribute__ ((packed));
2040
2041
2042 static struct irq_chip intcapxt_controller;
2043
intcapxt_irqdomain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)2044 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2045 struct irq_data *irqd, bool reserve)
2046 {
2047 return 0;
2048 }
2049
intcapxt_irqdomain_deactivate(struct irq_domain * domain,struct irq_data * irqd)2050 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2051 struct irq_data *irqd)
2052 {
2053 }
2054
2055
intcapxt_irqdomain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)2056 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2057 unsigned int nr_irqs, void *arg)
2058 {
2059 struct irq_alloc_info *info = arg;
2060 int i, ret;
2061
2062 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2063 return -EINVAL;
2064
2065 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2066 if (ret < 0)
2067 return ret;
2068
2069 for (i = virq; i < virq + nr_irqs; i++) {
2070 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2071
2072 irqd->chip = &intcapxt_controller;
2073 irqd->chip_data = info->data;
2074 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2075 }
2076
2077 return ret;
2078 }
2079
intcapxt_irqdomain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2080 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2081 unsigned int nr_irqs)
2082 {
2083 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2084 }
2085
2086
intcapxt_unmask_irq(struct irq_data * irqd)2087 static void intcapxt_unmask_irq(struct irq_data *irqd)
2088 {
2089 struct amd_iommu *iommu = irqd->chip_data;
2090 struct irq_cfg *cfg = irqd_cfg(irqd);
2091 union intcapxt xt;
2092
2093 xt.capxt = 0ULL;
2094 xt.dest_mode_logical = apic->dest_mode_logical;
2095 xt.vector = cfg->vector;
2096 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2097 xt.destid_24_31 = cfg->dest_apicid >> 24;
2098
2099 /**
2100 * Current IOMMU implementation uses the same IRQ for all
2101 * 3 IOMMU interrupts.
2102 */
2103 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2104 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2105 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2106 }
2107
intcapxt_mask_irq(struct irq_data * irqd)2108 static void intcapxt_mask_irq(struct irq_data *irqd)
2109 {
2110 struct amd_iommu *iommu = irqd->chip_data;
2111
2112 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2113 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2114 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2115 }
2116
2117
intcapxt_set_affinity(struct irq_data * irqd,const struct cpumask * mask,bool force)2118 static int intcapxt_set_affinity(struct irq_data *irqd,
2119 const struct cpumask *mask, bool force)
2120 {
2121 struct irq_data *parent = irqd->parent_data;
2122 int ret;
2123
2124 ret = parent->chip->irq_set_affinity(parent, mask, force);
2125 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2126 return ret;
2127 return 0;
2128 }
2129
intcapxt_set_wake(struct irq_data * irqd,unsigned int on)2130 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2131 {
2132 return on ? -EOPNOTSUPP : 0;
2133 }
2134
2135 static struct irq_chip intcapxt_controller = {
2136 .name = "IOMMU-MSI",
2137 .irq_unmask = intcapxt_unmask_irq,
2138 .irq_mask = intcapxt_mask_irq,
2139 .irq_ack = irq_chip_ack_parent,
2140 .irq_retrigger = irq_chip_retrigger_hierarchy,
2141 .irq_set_affinity = intcapxt_set_affinity,
2142 .irq_set_wake = intcapxt_set_wake,
2143 .flags = IRQCHIP_MASK_ON_SUSPEND,
2144 };
2145
2146 static const struct irq_domain_ops intcapxt_domain_ops = {
2147 .alloc = intcapxt_irqdomain_alloc,
2148 .free = intcapxt_irqdomain_free,
2149 .activate = intcapxt_irqdomain_activate,
2150 .deactivate = intcapxt_irqdomain_deactivate,
2151 };
2152
2153
2154 static struct irq_domain *iommu_irqdomain;
2155
iommu_get_irqdomain(void)2156 static struct irq_domain *iommu_get_irqdomain(void)
2157 {
2158 struct fwnode_handle *fn;
2159
2160 /* No need for locking here (yet) as the init is single-threaded */
2161 if (iommu_irqdomain)
2162 return iommu_irqdomain;
2163
2164 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2165 if (!fn)
2166 return NULL;
2167
2168 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2169 fn, &intcapxt_domain_ops,
2170 NULL);
2171 if (!iommu_irqdomain)
2172 irq_domain_free_fwnode(fn);
2173
2174 return iommu_irqdomain;
2175 }
2176
iommu_setup_intcapxt(struct amd_iommu * iommu)2177 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2178 {
2179 struct irq_domain *domain;
2180 struct irq_alloc_info info;
2181 int irq, ret;
2182
2183 domain = iommu_get_irqdomain();
2184 if (!domain)
2185 return -ENXIO;
2186
2187 init_irq_alloc_info(&info, NULL);
2188 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2189 info.data = iommu;
2190
2191 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2192 if (irq < 0) {
2193 irq_domain_remove(domain);
2194 return irq;
2195 }
2196
2197 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2198 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2199 if (ret) {
2200 irq_domain_free_irqs(irq, 1);
2201 irq_domain_remove(domain);
2202 return ret;
2203 }
2204
2205 return 0;
2206 }
2207
iommu_init_irq(struct amd_iommu * iommu)2208 static int iommu_init_irq(struct amd_iommu *iommu)
2209 {
2210 int ret;
2211
2212 if (iommu->int_enabled)
2213 goto enable_faults;
2214
2215 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2216 ret = iommu_setup_intcapxt(iommu);
2217 else if (iommu->dev->msi_cap)
2218 ret = iommu_setup_msi(iommu);
2219 else
2220 ret = -ENODEV;
2221
2222 if (ret)
2223 return ret;
2224
2225 iommu->int_enabled = true;
2226 enable_faults:
2227
2228 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2229 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2230
2231 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2232
2233 if (iommu->ppr_log != NULL)
2234 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2235
2236 iommu_ga_log_enable(iommu);
2237
2238 return 0;
2239 }
2240
2241 /****************************************************************************
2242 *
2243 * The next functions belong to the third pass of parsing the ACPI
2244 * table. In this last pass the memory mapping requirements are
2245 * gathered (like exclusion and unity mapping ranges).
2246 *
2247 ****************************************************************************/
2248
free_unity_maps(void)2249 static void __init free_unity_maps(void)
2250 {
2251 struct unity_map_entry *entry, *next;
2252
2253 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2254 list_del(&entry->list);
2255 kfree(entry);
2256 }
2257 }
2258
2259 /* called for unity map ACPI definition */
init_unity_map_range(struct ivmd_header * m)2260 static int __init init_unity_map_range(struct ivmd_header *m)
2261 {
2262 struct unity_map_entry *e = NULL;
2263 char *s;
2264
2265 e = kzalloc(sizeof(*e), GFP_KERNEL);
2266 if (e == NULL)
2267 return -ENOMEM;
2268
2269 switch (m->type) {
2270 default:
2271 kfree(e);
2272 return 0;
2273 case ACPI_IVMD_TYPE:
2274 s = "IVMD_TYPEi\t\t\t";
2275 e->devid_start = e->devid_end = m->devid;
2276 break;
2277 case ACPI_IVMD_TYPE_ALL:
2278 s = "IVMD_TYPE_ALL\t\t";
2279 e->devid_start = 0;
2280 e->devid_end = amd_iommu_last_bdf;
2281 break;
2282 case ACPI_IVMD_TYPE_RANGE:
2283 s = "IVMD_TYPE_RANGE\t\t";
2284 e->devid_start = m->devid;
2285 e->devid_end = m->aux;
2286 break;
2287 }
2288 e->address_start = PAGE_ALIGN(m->range_start);
2289 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2290 e->prot = m->flags >> 1;
2291
2292 /*
2293 * Treat per-device exclusion ranges as r/w unity-mapped regions
2294 * since some buggy BIOSes might lead to the overwritten exclusion
2295 * range (exclusion_start and exclusion_length members). This
2296 * happens when there are multiple exclusion ranges (IVMD entries)
2297 * defined in ACPI table.
2298 */
2299 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2300 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2301
2302 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2303 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2304 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2305 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2306 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2307 e->address_start, e->address_end, m->flags);
2308
2309 list_add_tail(&e->list, &amd_iommu_unity_map);
2310
2311 return 0;
2312 }
2313
2314 /* iterates over all memory definitions we find in the ACPI table */
init_memory_definitions(struct acpi_table_header * table)2315 static int __init init_memory_definitions(struct acpi_table_header *table)
2316 {
2317 u8 *p = (u8 *)table, *end = (u8 *)table;
2318 struct ivmd_header *m;
2319
2320 end += table->length;
2321 p += IVRS_HEADER_LENGTH;
2322
2323 while (p < end) {
2324 m = (struct ivmd_header *)p;
2325 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2326 init_unity_map_range(m);
2327
2328 p += m->length;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /*
2335 * Init the device table to not allow DMA access for devices
2336 */
init_device_table_dma(void)2337 static void init_device_table_dma(void)
2338 {
2339 u32 devid;
2340
2341 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2342 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2343 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2344 }
2345 }
2346
uninit_device_table_dma(void)2347 static void __init uninit_device_table_dma(void)
2348 {
2349 u32 devid;
2350
2351 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2352 amd_iommu_dev_table[devid].data[0] = 0ULL;
2353 amd_iommu_dev_table[devid].data[1] = 0ULL;
2354 }
2355 }
2356
init_device_table(void)2357 static void init_device_table(void)
2358 {
2359 u32 devid;
2360
2361 if (!amd_iommu_irq_remap)
2362 return;
2363
2364 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2365 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2366 }
2367
iommu_init_flags(struct amd_iommu * iommu)2368 static void iommu_init_flags(struct amd_iommu *iommu)
2369 {
2370 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2371 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2372 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2373
2374 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2375 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2376 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2377
2378 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2379 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2380 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2381
2382 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2383 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2384 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2385
2386 /*
2387 * make IOMMU memory accesses cache coherent
2388 */
2389 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2390
2391 /* Set IOTLB invalidation timeout to 1s */
2392 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2393 }
2394
iommu_apply_resume_quirks(struct amd_iommu * iommu)2395 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2396 {
2397 int i, j;
2398 u32 ioc_feature_control;
2399 struct pci_dev *pdev = iommu->root_pdev;
2400
2401 /* RD890 BIOSes may not have completely reconfigured the iommu */
2402 if (!is_rd890_iommu(iommu->dev) || !pdev)
2403 return;
2404
2405 /*
2406 * First, we need to ensure that the iommu is enabled. This is
2407 * controlled by a register in the northbridge
2408 */
2409
2410 /* Select Northbridge indirect register 0x75 and enable writing */
2411 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2412 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2413
2414 /* Enable the iommu */
2415 if (!(ioc_feature_control & 0x1))
2416 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2417
2418 /* Restore the iommu BAR */
2419 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2420 iommu->stored_addr_lo);
2421 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2422 iommu->stored_addr_hi);
2423
2424 /* Restore the l1 indirect regs for each of the 6 l1s */
2425 for (i = 0; i < 6; i++)
2426 for (j = 0; j < 0x12; j++)
2427 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2428
2429 /* Restore the l2 indirect regs */
2430 for (i = 0; i < 0x83; i++)
2431 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2432
2433 /* Lock PCI setup registers */
2434 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2435 iommu->stored_addr_lo | 1);
2436 }
2437
iommu_enable_ga(struct amd_iommu * iommu)2438 static void iommu_enable_ga(struct amd_iommu *iommu)
2439 {
2440 #ifdef CONFIG_IRQ_REMAP
2441 switch (amd_iommu_guest_ir) {
2442 case AMD_IOMMU_GUEST_IR_VAPIC:
2443 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2444 fallthrough;
2445 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2446 iommu_feature_enable(iommu, CONTROL_GA_EN);
2447 iommu->irte_ops = &irte_128_ops;
2448 break;
2449 default:
2450 iommu->irte_ops = &irte_32_ops;
2451 break;
2452 }
2453 #endif
2454 }
2455
early_enable_iommu(struct amd_iommu * iommu)2456 static void early_enable_iommu(struct amd_iommu *iommu)
2457 {
2458 iommu_disable(iommu);
2459 iommu_init_flags(iommu);
2460 iommu_set_device_table(iommu);
2461 iommu_enable_command_buffer(iommu);
2462 iommu_enable_event_buffer(iommu);
2463 iommu_set_exclusion_range(iommu);
2464 iommu_enable_ga(iommu);
2465 iommu_enable_xt(iommu);
2466 iommu_enable(iommu);
2467 iommu_flush_all_caches(iommu);
2468 }
2469
2470 /*
2471 * This function finally enables all IOMMUs found in the system after
2472 * they have been initialized.
2473 *
2474 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2475 * the old content of device table entries. Not this case or copy failed,
2476 * just continue as normal kernel does.
2477 */
early_enable_iommus(void)2478 static void early_enable_iommus(void)
2479 {
2480 struct amd_iommu *iommu;
2481
2482
2483 if (!copy_device_table()) {
2484 /*
2485 * If come here because of failure in copying device table from old
2486 * kernel with all IOMMUs enabled, print error message and try to
2487 * free allocated old_dev_tbl_cpy.
2488 */
2489 if (amd_iommu_pre_enabled)
2490 pr_err("Failed to copy DEV table from previous kernel.\n");
2491 if (old_dev_tbl_cpy != NULL)
2492 free_pages((unsigned long)old_dev_tbl_cpy,
2493 get_order(dev_table_size));
2494
2495 for_each_iommu(iommu) {
2496 clear_translation_pre_enabled(iommu);
2497 early_enable_iommu(iommu);
2498 }
2499 } else {
2500 pr_info("Copied DEV table from previous kernel.\n");
2501 free_pages((unsigned long)amd_iommu_dev_table,
2502 get_order(dev_table_size));
2503 amd_iommu_dev_table = old_dev_tbl_cpy;
2504 for_each_iommu(iommu) {
2505 iommu_disable_command_buffer(iommu);
2506 iommu_disable_event_buffer(iommu);
2507 iommu_enable_command_buffer(iommu);
2508 iommu_enable_event_buffer(iommu);
2509 iommu_enable_ga(iommu);
2510 iommu_enable_xt(iommu);
2511 iommu_set_device_table(iommu);
2512 iommu_flush_all_caches(iommu);
2513 }
2514 }
2515
2516 #ifdef CONFIG_IRQ_REMAP
2517 /*
2518 * Note: We have already checked GASup from IVRS table.
2519 * Now, we need to make sure that GAMSup is set.
2520 */
2521 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2522 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
2523 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2524
2525 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2526 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2527 #endif
2528 }
2529
enable_iommus_v2(void)2530 static void enable_iommus_v2(void)
2531 {
2532 struct amd_iommu *iommu;
2533
2534 for_each_iommu(iommu) {
2535 iommu_enable_ppr_log(iommu);
2536 iommu_enable_gt(iommu);
2537 }
2538 }
2539
enable_iommus(void)2540 static void enable_iommus(void)
2541 {
2542 early_enable_iommus();
2543
2544 enable_iommus_v2();
2545 }
2546
disable_iommus(void)2547 static void disable_iommus(void)
2548 {
2549 struct amd_iommu *iommu;
2550
2551 for_each_iommu(iommu)
2552 iommu_disable(iommu);
2553
2554 #ifdef CONFIG_IRQ_REMAP
2555 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2556 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2557 #endif
2558 }
2559
2560 /*
2561 * Suspend/Resume support
2562 * disable suspend until real resume implemented
2563 */
2564
amd_iommu_resume(void)2565 static void amd_iommu_resume(void)
2566 {
2567 struct amd_iommu *iommu;
2568
2569 for_each_iommu(iommu)
2570 iommu_apply_resume_quirks(iommu);
2571
2572 /* re-load the hardware */
2573 enable_iommus();
2574
2575 amd_iommu_enable_interrupts();
2576 }
2577
amd_iommu_suspend(void)2578 static int amd_iommu_suspend(void)
2579 {
2580 /* disable IOMMUs to go out of the way for BIOS */
2581 disable_iommus();
2582
2583 return 0;
2584 }
2585
2586 static struct syscore_ops amd_iommu_syscore_ops = {
2587 .suspend = amd_iommu_suspend,
2588 .resume = amd_iommu_resume,
2589 };
2590
free_iommu_resources(void)2591 static void __init free_iommu_resources(void)
2592 {
2593 kmemleak_free(irq_lookup_table);
2594 free_pages((unsigned long)irq_lookup_table,
2595 get_order(rlookup_table_size));
2596 irq_lookup_table = NULL;
2597
2598 kmem_cache_destroy(amd_iommu_irq_cache);
2599 amd_iommu_irq_cache = NULL;
2600
2601 free_pages((unsigned long)amd_iommu_rlookup_table,
2602 get_order(rlookup_table_size));
2603 amd_iommu_rlookup_table = NULL;
2604
2605 free_pages((unsigned long)amd_iommu_alias_table,
2606 get_order(alias_table_size));
2607 amd_iommu_alias_table = NULL;
2608
2609 free_pages((unsigned long)amd_iommu_dev_table,
2610 get_order(dev_table_size));
2611 amd_iommu_dev_table = NULL;
2612
2613 free_iommu_all();
2614 }
2615
2616 /* SB IOAPIC is always on this device in AMD systems */
2617 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2618
check_ioapic_information(void)2619 static bool __init check_ioapic_information(void)
2620 {
2621 const char *fw_bug = FW_BUG;
2622 bool ret, has_sb_ioapic;
2623 int idx;
2624
2625 has_sb_ioapic = false;
2626 ret = false;
2627
2628 /*
2629 * If we have map overrides on the kernel command line the
2630 * messages in this function might not describe firmware bugs
2631 * anymore - so be careful
2632 */
2633 if (cmdline_maps)
2634 fw_bug = "";
2635
2636 for (idx = 0; idx < nr_ioapics; idx++) {
2637 int devid, id = mpc_ioapic_id(idx);
2638
2639 devid = get_ioapic_devid(id);
2640 if (devid < 0) {
2641 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2642 fw_bug, id);
2643 ret = false;
2644 } else if (devid == IOAPIC_SB_DEVID) {
2645 has_sb_ioapic = true;
2646 ret = true;
2647 }
2648 }
2649
2650 if (!has_sb_ioapic) {
2651 /*
2652 * We expect the SB IOAPIC to be listed in the IVRS
2653 * table. The system timer is connected to the SB IOAPIC
2654 * and if we don't have it in the list the system will
2655 * panic at boot time. This situation usually happens
2656 * when the BIOS is buggy and provides us the wrong
2657 * device id for the IOAPIC in the system.
2658 */
2659 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2660 }
2661
2662 if (!ret)
2663 pr_err("Disabling interrupt remapping\n");
2664
2665 return ret;
2666 }
2667
free_dma_resources(void)2668 static void __init free_dma_resources(void)
2669 {
2670 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2671 get_order(MAX_DOMAIN_ID/8));
2672 amd_iommu_pd_alloc_bitmap = NULL;
2673
2674 free_unity_maps();
2675 }
2676
ivinfo_init(void * ivrs)2677 static void __init ivinfo_init(void *ivrs)
2678 {
2679 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2680 }
2681
2682 /*
2683 * This is the hardware init function for AMD IOMMU in the system.
2684 * This function is called either from amd_iommu_init or from the interrupt
2685 * remapping setup code.
2686 *
2687 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2688 * four times:
2689 *
2690 * 1 pass) Discover the most comprehensive IVHD type to use.
2691 *
2692 * 2 pass) Find the highest PCI device id the driver has to handle.
2693 * Upon this information the size of the data structures is
2694 * determined that needs to be allocated.
2695 *
2696 * 3 pass) Initialize the data structures just allocated with the
2697 * information in the ACPI table about available AMD IOMMUs
2698 * in the system. It also maps the PCI devices in the
2699 * system to specific IOMMUs
2700 *
2701 * 4 pass) After the basic data structures are allocated and
2702 * initialized we update them with information about memory
2703 * remapping requirements parsed out of the ACPI table in
2704 * this last pass.
2705 *
2706 * After everything is set up the IOMMUs are enabled and the necessary
2707 * hotplug and suspend notifiers are registered.
2708 */
early_amd_iommu_init(void)2709 static int __init early_amd_iommu_init(void)
2710 {
2711 struct acpi_table_header *ivrs_base;
2712 int i, remap_cache_sz, ret;
2713 acpi_status status;
2714
2715 if (!amd_iommu_detected)
2716 return -ENODEV;
2717
2718 status = acpi_get_table("IVRS", 0, &ivrs_base);
2719 if (status == AE_NOT_FOUND)
2720 return -ENODEV;
2721 else if (ACPI_FAILURE(status)) {
2722 const char *err = acpi_format_exception(status);
2723 pr_err("IVRS table error: %s\n", err);
2724 return -EINVAL;
2725 }
2726
2727 /*
2728 * Validate checksum here so we don't need to do it when
2729 * we actually parse the table
2730 */
2731 ret = check_ivrs_checksum(ivrs_base);
2732 if (ret)
2733 goto out;
2734
2735 ivinfo_init(ivrs_base);
2736
2737 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2738 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2739
2740 /*
2741 * First parse ACPI tables to find the largest Bus/Dev/Func
2742 * we need to handle. Upon this information the shared data
2743 * structures for the IOMMUs in the system will be allocated
2744 */
2745 ret = find_last_devid_acpi(ivrs_base);
2746 if (ret)
2747 goto out;
2748
2749 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2750 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2751 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2752
2753 /* Device table - directly used by all IOMMUs */
2754 ret = -ENOMEM;
2755 amd_iommu_dev_table = (void *)__get_free_pages(
2756 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2757 get_order(dev_table_size));
2758 if (amd_iommu_dev_table == NULL)
2759 goto out;
2760
2761 /*
2762 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2763 * IOMMU see for that device
2764 */
2765 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2766 get_order(alias_table_size));
2767 if (amd_iommu_alias_table == NULL)
2768 goto out;
2769
2770 /* IOMMU rlookup table - find the IOMMU for a specific device */
2771 amd_iommu_rlookup_table = (void *)__get_free_pages(
2772 GFP_KERNEL | __GFP_ZERO,
2773 get_order(rlookup_table_size));
2774 if (amd_iommu_rlookup_table == NULL)
2775 goto out;
2776
2777 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2778 GFP_KERNEL | __GFP_ZERO,
2779 get_order(MAX_DOMAIN_ID/8));
2780 if (amd_iommu_pd_alloc_bitmap == NULL)
2781 goto out;
2782
2783 /*
2784 * let all alias entries point to itself
2785 */
2786 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2787 amd_iommu_alias_table[i] = i;
2788
2789 /*
2790 * never allocate domain 0 because its used as the non-allocated and
2791 * error value placeholder
2792 */
2793 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2794
2795 /*
2796 * now the data structures are allocated and basically initialized
2797 * start the real acpi table scan
2798 */
2799 ret = init_iommu_all(ivrs_base);
2800 if (ret)
2801 goto out;
2802
2803 /* Disable any previously enabled IOMMUs */
2804 if (!is_kdump_kernel() || amd_iommu_disabled)
2805 disable_iommus();
2806
2807 if (amd_iommu_irq_remap)
2808 amd_iommu_irq_remap = check_ioapic_information();
2809
2810 if (amd_iommu_irq_remap) {
2811 /*
2812 * Interrupt remapping enabled, create kmem_cache for the
2813 * remapping tables.
2814 */
2815 ret = -ENOMEM;
2816 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2817 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2818 else
2819 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2820 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2821 remap_cache_sz,
2822 DTE_INTTAB_ALIGNMENT,
2823 0, NULL);
2824 if (!amd_iommu_irq_cache)
2825 goto out;
2826
2827 irq_lookup_table = (void *)__get_free_pages(
2828 GFP_KERNEL | __GFP_ZERO,
2829 get_order(rlookup_table_size));
2830 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2831 1, GFP_KERNEL);
2832 if (!irq_lookup_table)
2833 goto out;
2834 }
2835
2836 ret = init_memory_definitions(ivrs_base);
2837 if (ret)
2838 goto out;
2839
2840 /* init the device table */
2841 init_device_table();
2842
2843 out:
2844 /* Don't leak any ACPI memory */
2845 acpi_put_table(ivrs_base);
2846
2847 return ret;
2848 }
2849
amd_iommu_enable_interrupts(void)2850 static int amd_iommu_enable_interrupts(void)
2851 {
2852 struct amd_iommu *iommu;
2853 int ret = 0;
2854
2855 for_each_iommu(iommu) {
2856 ret = iommu_init_irq(iommu);
2857 if (ret)
2858 goto out;
2859 }
2860
2861 out:
2862 return ret;
2863 }
2864
detect_ivrs(void)2865 static bool __init detect_ivrs(void)
2866 {
2867 struct acpi_table_header *ivrs_base;
2868 acpi_status status;
2869 int i;
2870
2871 status = acpi_get_table("IVRS", 0, &ivrs_base);
2872 if (status == AE_NOT_FOUND)
2873 return false;
2874 else if (ACPI_FAILURE(status)) {
2875 const char *err = acpi_format_exception(status);
2876 pr_err("IVRS table error: %s\n", err);
2877 return false;
2878 }
2879
2880 acpi_put_table(ivrs_base);
2881
2882 if (amd_iommu_force_enable)
2883 goto out;
2884
2885 /* Don't use IOMMU if there is Stoney Ridge graphics */
2886 for (i = 0; i < 32; i++) {
2887 u32 pci_id;
2888
2889 pci_id = read_pci_config(0, i, 0, 0);
2890 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2891 pr_info("Disable IOMMU on Stoney Ridge\n");
2892 return false;
2893 }
2894 }
2895
2896 out:
2897 /* Make sure ACS will be enabled during PCI probe */
2898 pci_request_acs();
2899
2900 return true;
2901 }
2902
2903 /****************************************************************************
2904 *
2905 * AMD IOMMU Initialization State Machine
2906 *
2907 ****************************************************************************/
2908
state_next(void)2909 static int __init state_next(void)
2910 {
2911 int ret = 0;
2912
2913 switch (init_state) {
2914 case IOMMU_START_STATE:
2915 if (!detect_ivrs()) {
2916 init_state = IOMMU_NOT_FOUND;
2917 ret = -ENODEV;
2918 } else {
2919 init_state = IOMMU_IVRS_DETECTED;
2920 }
2921 break;
2922 case IOMMU_IVRS_DETECTED:
2923 if (amd_iommu_disabled) {
2924 init_state = IOMMU_CMDLINE_DISABLED;
2925 ret = -EINVAL;
2926 } else {
2927 ret = early_amd_iommu_init();
2928 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2929 }
2930 break;
2931 case IOMMU_ACPI_FINISHED:
2932 early_enable_iommus();
2933 x86_platform.iommu_shutdown = disable_iommus;
2934 init_state = IOMMU_ENABLED;
2935 break;
2936 case IOMMU_ENABLED:
2937 register_syscore_ops(&amd_iommu_syscore_ops);
2938 ret = amd_iommu_init_pci();
2939 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2940 enable_iommus_v2();
2941 break;
2942 case IOMMU_PCI_INIT:
2943 ret = amd_iommu_enable_interrupts();
2944 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2945 break;
2946 case IOMMU_INTERRUPTS_EN:
2947 init_state = IOMMU_INITIALIZED;
2948 break;
2949 case IOMMU_INITIALIZED:
2950 /* Nothing to do */
2951 break;
2952 case IOMMU_NOT_FOUND:
2953 case IOMMU_INIT_ERROR:
2954 case IOMMU_CMDLINE_DISABLED:
2955 /* Error states => do nothing */
2956 ret = -EINVAL;
2957 break;
2958 default:
2959 /* Unknown state */
2960 BUG();
2961 }
2962
2963 if (ret) {
2964 free_dma_resources();
2965 if (!irq_remapping_enabled) {
2966 disable_iommus();
2967 free_iommu_resources();
2968 } else {
2969 struct amd_iommu *iommu;
2970
2971 uninit_device_table_dma();
2972 for_each_iommu(iommu)
2973 iommu_flush_all_caches(iommu);
2974 }
2975 }
2976 return ret;
2977 }
2978
iommu_go_to_state(enum iommu_init_state state)2979 static int __init iommu_go_to_state(enum iommu_init_state state)
2980 {
2981 int ret = -EINVAL;
2982
2983 while (init_state != state) {
2984 if (init_state == IOMMU_NOT_FOUND ||
2985 init_state == IOMMU_INIT_ERROR ||
2986 init_state == IOMMU_CMDLINE_DISABLED)
2987 break;
2988 ret = state_next();
2989 }
2990
2991 return ret;
2992 }
2993
2994 #ifdef CONFIG_IRQ_REMAP
amd_iommu_prepare(void)2995 int __init amd_iommu_prepare(void)
2996 {
2997 int ret;
2998
2999 amd_iommu_irq_remap = true;
3000
3001 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3002 if (ret) {
3003 amd_iommu_irq_remap = false;
3004 return ret;
3005 }
3006
3007 return amd_iommu_irq_remap ? 0 : -ENODEV;
3008 }
3009
amd_iommu_enable(void)3010 int __init amd_iommu_enable(void)
3011 {
3012 int ret;
3013
3014 ret = iommu_go_to_state(IOMMU_ENABLED);
3015 if (ret)
3016 return ret;
3017
3018 irq_remapping_enabled = 1;
3019 return amd_iommu_xt_mode;
3020 }
3021
amd_iommu_disable(void)3022 void amd_iommu_disable(void)
3023 {
3024 amd_iommu_suspend();
3025 }
3026
amd_iommu_reenable(int mode)3027 int amd_iommu_reenable(int mode)
3028 {
3029 amd_iommu_resume();
3030
3031 return 0;
3032 }
3033
amd_iommu_enable_faulting(void)3034 int __init amd_iommu_enable_faulting(void)
3035 {
3036 /* We enable MSI later when PCI is initialized */
3037 return 0;
3038 }
3039 #endif
3040
3041 /*
3042 * This is the core init function for AMD IOMMU hardware in the system.
3043 * This function is called from the generic x86 DMA layer initialization
3044 * code.
3045 */
amd_iommu_init(void)3046 static int __init amd_iommu_init(void)
3047 {
3048 struct amd_iommu *iommu;
3049 int ret;
3050
3051 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3052 #ifdef CONFIG_GART_IOMMU
3053 if (ret && list_empty(&amd_iommu_list)) {
3054 /*
3055 * We failed to initialize the AMD IOMMU - try fallback
3056 * to GART if possible.
3057 */
3058 gart_iommu_init();
3059 }
3060 #endif
3061
3062 for_each_iommu(iommu)
3063 amd_iommu_debugfs_setup(iommu);
3064
3065 return ret;
3066 }
3067
amd_iommu_sme_check(void)3068 static bool amd_iommu_sme_check(void)
3069 {
3070 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3071 (boot_cpu_data.x86 != 0x17))
3072 return true;
3073
3074 /* For Fam17h, a specific level of support is required */
3075 if (boot_cpu_data.microcode >= 0x08001205)
3076 return true;
3077
3078 if ((boot_cpu_data.microcode >= 0x08001126) &&
3079 (boot_cpu_data.microcode <= 0x080011ff))
3080 return true;
3081
3082 pr_notice("IOMMU not currently supported when SME is active\n");
3083
3084 return false;
3085 }
3086
3087 /****************************************************************************
3088 *
3089 * Early detect code. This code runs at IOMMU detection time in the DMA
3090 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3091 * IOMMUs
3092 *
3093 ****************************************************************************/
amd_iommu_detect(void)3094 int __init amd_iommu_detect(void)
3095 {
3096 int ret;
3097
3098 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3099 return -ENODEV;
3100
3101 if (!amd_iommu_sme_check())
3102 return -ENODEV;
3103
3104 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3105 if (ret)
3106 return ret;
3107
3108 amd_iommu_detected = true;
3109 iommu_detected = 1;
3110 x86_init.iommu.iommu_init = amd_iommu_init;
3111
3112 return 1;
3113 }
3114
3115 /****************************************************************************
3116 *
3117 * Parsing functions for the AMD IOMMU specific kernel command line
3118 * options.
3119 *
3120 ****************************************************************************/
3121
parse_amd_iommu_dump(char * str)3122 static int __init parse_amd_iommu_dump(char *str)
3123 {
3124 amd_iommu_dump = true;
3125
3126 return 1;
3127 }
3128
parse_amd_iommu_intr(char * str)3129 static int __init parse_amd_iommu_intr(char *str)
3130 {
3131 for (; *str; ++str) {
3132 if (strncmp(str, "legacy", 6) == 0) {
3133 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3134 break;
3135 }
3136 if (strncmp(str, "vapic", 5) == 0) {
3137 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3138 break;
3139 }
3140 }
3141 return 1;
3142 }
3143
parse_amd_iommu_options(char * str)3144 static int __init parse_amd_iommu_options(char *str)
3145 {
3146 for (; *str; ++str) {
3147 if (strncmp(str, "fullflush", 9) == 0) {
3148 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3149 iommu_set_dma_strict();
3150 }
3151 if (strncmp(str, "force_enable", 12) == 0)
3152 amd_iommu_force_enable = true;
3153 if (strncmp(str, "off", 3) == 0)
3154 amd_iommu_disabled = true;
3155 if (strncmp(str, "force_isolation", 15) == 0)
3156 amd_iommu_force_isolation = true;
3157 }
3158
3159 return 1;
3160 }
3161
parse_ivrs_ioapic(char * str)3162 static int __init parse_ivrs_ioapic(char *str)
3163 {
3164 unsigned int bus, dev, fn;
3165 int ret, id, i;
3166 u16 devid;
3167
3168 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3169
3170 if (ret != 4) {
3171 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3172 return 1;
3173 }
3174
3175 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3176 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3177 str);
3178 return 1;
3179 }
3180
3181 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3182
3183 cmdline_maps = true;
3184 i = early_ioapic_map_size++;
3185 early_ioapic_map[i].id = id;
3186 early_ioapic_map[i].devid = devid;
3187 early_ioapic_map[i].cmd_line = true;
3188
3189 return 1;
3190 }
3191
parse_ivrs_hpet(char * str)3192 static int __init parse_ivrs_hpet(char *str)
3193 {
3194 unsigned int bus, dev, fn;
3195 int ret, id, i;
3196 u16 devid;
3197
3198 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3199
3200 if (ret != 4) {
3201 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3202 return 1;
3203 }
3204
3205 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3206 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3207 str);
3208 return 1;
3209 }
3210
3211 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3212
3213 cmdline_maps = true;
3214 i = early_hpet_map_size++;
3215 early_hpet_map[i].id = id;
3216 early_hpet_map[i].devid = devid;
3217 early_hpet_map[i].cmd_line = true;
3218
3219 return 1;
3220 }
3221
parse_ivrs_acpihid(char * str)3222 static int __init parse_ivrs_acpihid(char *str)
3223 {
3224 u32 bus, dev, fn;
3225 char *hid, *uid, *p;
3226 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3227 int ret, i;
3228
3229 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3230 if (ret != 4) {
3231 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3232 return 1;
3233 }
3234
3235 p = acpiid;
3236 hid = strsep(&p, ":");
3237 uid = p;
3238
3239 if (!hid || !(*hid) || !uid) {
3240 pr_err("Invalid command line: hid or uid\n");
3241 return 1;
3242 }
3243
3244 i = early_acpihid_map_size++;
3245 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3246 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3247 early_acpihid_map[i].devid =
3248 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3249 early_acpihid_map[i].cmd_line = true;
3250
3251 return 1;
3252 }
3253
3254 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3255 __setup("amd_iommu=", parse_amd_iommu_options);
3256 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3257 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3258 __setup("ivrs_hpet", parse_ivrs_hpet);
3259 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3260
amd_iommu_v2_supported(void)3261 bool amd_iommu_v2_supported(void)
3262 {
3263 return amd_iommu_v2_present;
3264 }
3265 EXPORT_SYMBOL(amd_iommu_v2_supported);
3266
get_amd_iommu(unsigned int idx)3267 struct amd_iommu *get_amd_iommu(unsigned int idx)
3268 {
3269 unsigned int i = 0;
3270 struct amd_iommu *iommu;
3271
3272 for_each_iommu(iommu)
3273 if (i++ == idx)
3274 return iommu;
3275 return NULL;
3276 }
3277
3278 /****************************************************************************
3279 *
3280 * IOMMU EFR Performance Counter support functionality. This code allows
3281 * access to the IOMMU PC functionality.
3282 *
3283 ****************************************************************************/
3284
amd_iommu_pc_get_max_banks(unsigned int idx)3285 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3286 {
3287 struct amd_iommu *iommu = get_amd_iommu(idx);
3288
3289 if (iommu)
3290 return iommu->max_banks;
3291
3292 return 0;
3293 }
3294 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3295
amd_iommu_pc_supported(void)3296 bool amd_iommu_pc_supported(void)
3297 {
3298 return amd_iommu_pc_present;
3299 }
3300 EXPORT_SYMBOL(amd_iommu_pc_supported);
3301
amd_iommu_pc_get_max_counters(unsigned int idx)3302 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3303 {
3304 struct amd_iommu *iommu = get_amd_iommu(idx);
3305
3306 if (iommu)
3307 return iommu->max_counters;
3308
3309 return 0;
3310 }
3311 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3312
iommu_pc_get_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value,bool is_write)3313 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3314 u8 fxn, u64 *value, bool is_write)
3315 {
3316 u32 offset;
3317 u32 max_offset_lim;
3318
3319 /* Make sure the IOMMU PC resource is available */
3320 if (!amd_iommu_pc_present)
3321 return -ENODEV;
3322
3323 /* Check for valid iommu and pc register indexing */
3324 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3325 return -ENODEV;
3326
3327 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3328
3329 /* Limit the offset to the hw defined mmio region aperture */
3330 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3331 (iommu->max_counters << 8) | 0x28);
3332 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3333 (offset > max_offset_lim))
3334 return -EINVAL;
3335
3336 if (is_write) {
3337 u64 val = *value & GENMASK_ULL(47, 0);
3338
3339 writel((u32)val, iommu->mmio_base + offset);
3340 writel((val >> 32), iommu->mmio_base + offset + 4);
3341 } else {
3342 *value = readl(iommu->mmio_base + offset + 4);
3343 *value <<= 32;
3344 *value |= readl(iommu->mmio_base + offset);
3345 *value &= GENMASK_ULL(47, 0);
3346 }
3347
3348 return 0;
3349 }
3350
amd_iommu_pc_get_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3351 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3352 {
3353 if (!iommu)
3354 return -EINVAL;
3355
3356 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3357 }
3358
amd_iommu_pc_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3359 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3360 {
3361 if (!iommu)
3362 return -EINVAL;
3363
3364 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3365 }
3366