1 /*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H
21 #define _ASM_X86_AMD_IOMMU_TYPES_H
22
23 #include <linux/types.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27
28 /*
29 * Maximum number of IOMMUs supported
30 */
31 #define MAX_IOMMUS 32
32
33 /*
34 * some size calculation constants
35 */
36 #define DEV_TABLE_ENTRY_SIZE 32
37 #define ALIAS_TABLE_ENTRY_SIZE 2
38 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
39
40 /* Length of the MMIO region for the AMD IOMMU */
41 #define MMIO_REGION_LENGTH 0x4000
42
43 /* Capability offsets used by the driver */
44 #define MMIO_CAP_HDR_OFFSET 0x00
45 #define MMIO_RANGE_OFFSET 0x0c
46 #define MMIO_MISC_OFFSET 0x10
47
48 /* Masks, shifts and macros to parse the device range capability */
49 #define MMIO_RANGE_LD_MASK 0xff000000
50 #define MMIO_RANGE_FD_MASK 0x00ff0000
51 #define MMIO_RANGE_BUS_MASK 0x0000ff00
52 #define MMIO_RANGE_LD_SHIFT 24
53 #define MMIO_RANGE_FD_SHIFT 16
54 #define MMIO_RANGE_BUS_SHIFT 8
55 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
56 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
57 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
58 #define MMIO_MSI_NUM(x) ((x) & 0x1f)
59
60 /* Flag masks for the AMD IOMMU exclusion range */
61 #define MMIO_EXCL_ENABLE_MASK 0x01ULL
62 #define MMIO_EXCL_ALLOW_MASK 0x02ULL
63
64 /* Used offsets into the MMIO space */
65 #define MMIO_DEV_TABLE_OFFSET 0x0000
66 #define MMIO_CMD_BUF_OFFSET 0x0008
67 #define MMIO_EVT_BUF_OFFSET 0x0010
68 #define MMIO_CONTROL_OFFSET 0x0018
69 #define MMIO_EXCL_BASE_OFFSET 0x0020
70 #define MMIO_EXCL_LIMIT_OFFSET 0x0028
71 #define MMIO_EXT_FEATURES 0x0030
72 #define MMIO_PPR_LOG_OFFSET 0x0038
73 #define MMIO_CMD_HEAD_OFFSET 0x2000
74 #define MMIO_CMD_TAIL_OFFSET 0x2008
75 #define MMIO_EVT_HEAD_OFFSET 0x2010
76 #define MMIO_EVT_TAIL_OFFSET 0x2018
77 #define MMIO_STATUS_OFFSET 0x2020
78 #define MMIO_PPR_HEAD_OFFSET 0x2030
79 #define MMIO_PPR_TAIL_OFFSET 0x2038
80
81
82 /* Extended Feature Bits */
83 #define FEATURE_PREFETCH (1ULL<<0)
84 #define FEATURE_PPR (1ULL<<1)
85 #define FEATURE_X2APIC (1ULL<<2)
86 #define FEATURE_NX (1ULL<<3)
87 #define FEATURE_GT (1ULL<<4)
88 #define FEATURE_IA (1ULL<<6)
89 #define FEATURE_GA (1ULL<<7)
90 #define FEATURE_HE (1ULL<<8)
91 #define FEATURE_PC (1ULL<<9)
92
93 #define FEATURE_PASID_SHIFT 32
94 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
95
96 #define FEATURE_GLXVAL_SHIFT 14
97 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
98
99 #define PASID_MASK 0x000fffff
100
101 /* MMIO status bits */
102 #define MMIO_STATUS_EVT_INT_MASK (1 << 1)
103 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
104 #define MMIO_STATUS_PPR_INT_MASK (1 << 6)
105
106 /* event logging constants */
107 #define EVENT_ENTRY_SIZE 0x10
108 #define EVENT_TYPE_SHIFT 28
109 #define EVENT_TYPE_MASK 0xf
110 #define EVENT_TYPE_ILL_DEV 0x1
111 #define EVENT_TYPE_IO_FAULT 0x2
112 #define EVENT_TYPE_DEV_TAB_ERR 0x3
113 #define EVENT_TYPE_PAGE_TAB_ERR 0x4
114 #define EVENT_TYPE_ILL_CMD 0x5
115 #define EVENT_TYPE_CMD_HARD_ERR 0x6
116 #define EVENT_TYPE_IOTLB_INV_TO 0x7
117 #define EVENT_TYPE_INV_DEV_REQ 0x8
118 #define EVENT_DEVID_MASK 0xffff
119 #define EVENT_DEVID_SHIFT 0
120 #define EVENT_DOMID_MASK 0xffff
121 #define EVENT_DOMID_SHIFT 0
122 #define EVENT_FLAGS_MASK 0xfff
123 #define EVENT_FLAGS_SHIFT 0x10
124
125 /* feature control bits */
126 #define CONTROL_IOMMU_EN 0x00ULL
127 #define CONTROL_HT_TUN_EN 0x01ULL
128 #define CONTROL_EVT_LOG_EN 0x02ULL
129 #define CONTROL_EVT_INT_EN 0x03ULL
130 #define CONTROL_COMWAIT_EN 0x04ULL
131 #define CONTROL_INV_TIMEOUT 0x05ULL
132 #define CONTROL_PASSPW_EN 0x08ULL
133 #define CONTROL_RESPASSPW_EN 0x09ULL
134 #define CONTROL_COHERENT_EN 0x0aULL
135 #define CONTROL_ISOC_EN 0x0bULL
136 #define CONTROL_CMDBUF_EN 0x0cULL
137 #define CONTROL_PPFLOG_EN 0x0dULL
138 #define CONTROL_PPFINT_EN 0x0eULL
139 #define CONTROL_PPR_EN 0x0fULL
140 #define CONTROL_GT_EN 0x10ULL
141
142 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
143 #define CTRL_INV_TO_NONE 0
144 #define CTRL_INV_TO_1MS 1
145 #define CTRL_INV_TO_10MS 2
146 #define CTRL_INV_TO_100MS 3
147 #define CTRL_INV_TO_1S 4
148 #define CTRL_INV_TO_10S 5
149 #define CTRL_INV_TO_100S 6
150
151 /* command specific defines */
152 #define CMD_COMPL_WAIT 0x01
153 #define CMD_INV_DEV_ENTRY 0x02
154 #define CMD_INV_IOMMU_PAGES 0x03
155 #define CMD_INV_IOTLB_PAGES 0x04
156 #define CMD_COMPLETE_PPR 0x07
157 #define CMD_INV_ALL 0x08
158
159 #define CMD_COMPL_WAIT_STORE_MASK 0x01
160 #define CMD_COMPL_WAIT_INT_MASK 0x02
161 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
162 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
163 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
164
165 #define PPR_STATUS_MASK 0xf
166 #define PPR_STATUS_SHIFT 12
167
168 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
169
170 /* macros and definitions for device table entries */
171 #define DEV_ENTRY_VALID 0x00
172 #define DEV_ENTRY_TRANSLATION 0x01
173 #define DEV_ENTRY_IR 0x3d
174 #define DEV_ENTRY_IW 0x3e
175 #define DEV_ENTRY_NO_PAGE_FAULT 0x62
176 #define DEV_ENTRY_EX 0x67
177 #define DEV_ENTRY_SYSMGT1 0x68
178 #define DEV_ENTRY_SYSMGT2 0x69
179 #define DEV_ENTRY_INIT_PASS 0xb8
180 #define DEV_ENTRY_EINT_PASS 0xb9
181 #define DEV_ENTRY_NMI_PASS 0xba
182 #define DEV_ENTRY_LINT0_PASS 0xbe
183 #define DEV_ENTRY_LINT1_PASS 0xbf
184 #define DEV_ENTRY_MODE_MASK 0x07
185 #define DEV_ENTRY_MODE_SHIFT 0x09
186
187 /* constants to configure the command buffer */
188 #define CMD_BUFFER_SIZE 8192
189 #define CMD_BUFFER_UNINITIALIZED 1
190 #define CMD_BUFFER_ENTRIES 512
191 #define MMIO_CMD_SIZE_SHIFT 56
192 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
193
194 /* constants for event buffer handling */
195 #define EVT_BUFFER_SIZE 8192 /* 512 entries */
196 #define EVT_LEN_MASK (0x9ULL << 56)
197
198 /* Constants for PPR Log handling */
199 #define PPR_LOG_ENTRIES 512
200 #define PPR_LOG_SIZE_SHIFT 56
201 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
202 #define PPR_ENTRY_SIZE 16
203 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
204
205 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
206 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
207 #define PPR_DEVID(x) ((x) & 0xffffULL)
208 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
209 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
210 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
211 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
212
213 #define PPR_REQ_FAULT 0x01
214
215 #define PAGE_MODE_NONE 0x00
216 #define PAGE_MODE_1_LEVEL 0x01
217 #define PAGE_MODE_2_LEVEL 0x02
218 #define PAGE_MODE_3_LEVEL 0x03
219 #define PAGE_MODE_4_LEVEL 0x04
220 #define PAGE_MODE_5_LEVEL 0x05
221 #define PAGE_MODE_6_LEVEL 0x06
222
223 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
224 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \
225 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
226 (0xffffffffffffffffULL))
227 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
228 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
229 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
230 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
231 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
232
233 #define PM_MAP_4k 0
234 #define PM_ADDR_MASK 0x000ffffffffff000ULL
235 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
236 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
237 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
238
239 /*
240 * Returns the page table level to use for a given page size
241 * Pagesize is expected to be a power-of-two
242 */
243 #define PAGE_SIZE_LEVEL(pagesize) \
244 ((__ffs(pagesize) - 12) / 9)
245 /*
246 * Returns the number of ptes to use for a given page size
247 * Pagesize is expected to be a power-of-two
248 */
249 #define PAGE_SIZE_PTE_COUNT(pagesize) \
250 (1ULL << ((__ffs(pagesize) - 12) % 9))
251
252 /*
253 * Aligns a given io-virtual address to a given page size
254 * Pagesize is expected to be a power-of-two
255 */
256 #define PAGE_SIZE_ALIGN(address, pagesize) \
257 ((address) & ~((pagesize) - 1))
258 /*
259 * Creates an IOMMU PTE for an address an a given pagesize
260 * The PTE has no permission bits set
261 * Pagesize is expected to be a power-of-two larger than 4096
262 */
263 #define PAGE_SIZE_PTE(address, pagesize) \
264 (((address) | ((pagesize) - 1)) & \
265 (~(pagesize >> 1)) & PM_ADDR_MASK)
266
267 /*
268 * Takes a PTE value with mode=0x07 and returns the page size it maps
269 */
270 #define PTE_PAGE_SIZE(pte) \
271 (1ULL << (1 + ffz(((pte) | 0xfffULL))))
272
273 #define IOMMU_PTE_P (1ULL << 0)
274 #define IOMMU_PTE_TV (1ULL << 1)
275 #define IOMMU_PTE_U (1ULL << 59)
276 #define IOMMU_PTE_FC (1ULL << 60)
277 #define IOMMU_PTE_IR (1ULL << 61)
278 #define IOMMU_PTE_IW (1ULL << 62)
279
280 #define DTE_FLAG_IOTLB (0x01UL << 32)
281 #define DTE_FLAG_GV (0x01ULL << 55)
282 #define DTE_GLX_SHIFT (56)
283 #define DTE_GLX_MASK (3)
284
285 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
286 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
287 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
288
289 #define DTE_GCR3_INDEX_A 0
290 #define DTE_GCR3_INDEX_B 1
291 #define DTE_GCR3_INDEX_C 1
292
293 #define DTE_GCR3_SHIFT_A 58
294 #define DTE_GCR3_SHIFT_B 16
295 #define DTE_GCR3_SHIFT_C 43
296
297 #define GCR3_VALID 0x01ULL
298
299 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
300 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
301 #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
302 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
303
304 #define IOMMU_PROT_MASK 0x03
305 #define IOMMU_PROT_IR 0x01
306 #define IOMMU_PROT_IW 0x02
307
308 /* IOMMU capabilities */
309 #define IOMMU_CAP_IOTLB 24
310 #define IOMMU_CAP_NPCACHE 26
311 #define IOMMU_CAP_EFR 27
312
313 #define MAX_DOMAIN_ID 65536
314
315 /* FIXME: move this macro to <linux/pci.h> */
316 #define PCI_BUS(x) (((x) >> 8) & 0xff)
317
318 /* Protection domain flags */
319 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
320 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
321 domain for an IOMMU */
322 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
323 translation */
324 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
325
326 extern bool amd_iommu_dump;
327 #define DUMP_printk(format, arg...) \
328 do { \
329 if (amd_iommu_dump) \
330 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
331 } while(0);
332
333 /* global flag if IOMMUs cache non-present entries */
334 extern bool amd_iommu_np_cache;
335 /* Only true if all IOMMUs support device IOTLBs */
336 extern bool amd_iommu_iotlb_sup;
337
338 /*
339 * Make iterating over all IOMMUs easier
340 */
341 #define for_each_iommu(iommu) \
342 list_for_each_entry((iommu), &amd_iommu_list, list)
343 #define for_each_iommu_safe(iommu, next) \
344 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
345
346 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */
347 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
348 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
349 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
350 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
351 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
352
353
354 /*
355 * This struct is used to pass information about
356 * incoming PPR faults around.
357 */
358 struct amd_iommu_fault {
359 u64 address; /* IO virtual address of the fault*/
360 u32 pasid; /* Address space identifier */
361 u16 device_id; /* Originating PCI device id */
362 u16 tag; /* PPR tag */
363 u16 flags; /* Fault flags */
364
365 };
366
367 #define PPR_FAULT_EXEC (1 << 1)
368 #define PPR_FAULT_READ (1 << 2)
369 #define PPR_FAULT_WRITE (1 << 5)
370 #define PPR_FAULT_USER (1 << 6)
371 #define PPR_FAULT_RSVD (1 << 7)
372 #define PPR_FAULT_GN (1 << 8)
373
374 struct iommu_domain;
375
376 /*
377 * This structure contains generic data for IOMMU protection domains
378 * independent of their use.
379 */
380 struct protection_domain {
381 struct list_head list; /* for list of all protection domains */
382 struct list_head dev_list; /* List of all devices in this domain */
383 spinlock_t lock; /* mostly used to lock the page table*/
384 struct mutex api_lock; /* protect page tables in the iommu-api path */
385 u16 id; /* the domain id written to the device table */
386 int mode; /* paging mode (0-6 levels) */
387 u64 *pt_root; /* page table root pointer */
388 int glx; /* Number of levels for GCR3 table */
389 u64 *gcr3_tbl; /* Guest CR3 table */
390 unsigned long flags; /* flags to find out type of domain */
391 bool updated; /* complete domain flush required */
392 unsigned dev_cnt; /* devices assigned to this domain */
393 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
394 void *priv; /* private data */
395 struct iommu_domain *iommu_domain; /* Pointer to generic
396 domain structure */
397
398 };
399
400 /*
401 * This struct contains device specific data for the IOMMU
402 */
403 struct iommu_dev_data {
404 struct list_head list; /* For domain->dev_list */
405 struct list_head dev_data_list; /* For global dev_data_list */
406 struct iommu_dev_data *alias_data;/* The alias dev_data */
407 struct protection_domain *domain; /* Domain the device is bound to */
408 atomic_t bind; /* Domain attach reverent count */
409 u16 devid; /* PCI Device ID */
410 bool iommu_v2; /* Device can make use of IOMMUv2 */
411 bool passthrough; /* Default for device is pt_domain */
412 struct {
413 bool enabled;
414 int qdep;
415 } ats; /* ATS state */
416 bool pri_tlp; /* PASID TLB required for
417 PPR completions */
418 u32 errata; /* Bitmap for errata to apply */
419 };
420
421 /*
422 * For dynamic growth the aperture size is split into ranges of 128MB of
423 * DMA address space each. This struct represents one such range.
424 */
425 struct aperture_range {
426
427 /* address allocation bitmap */
428 unsigned long *bitmap;
429
430 /*
431 * Array of PTE pages for the aperture. In this array we save all the
432 * leaf pages of the domain page table used for the aperture. This way
433 * we don't need to walk the page table to find a specific PTE. We can
434 * just calculate its address in constant time.
435 */
436 u64 *pte_pages[64];
437
438 unsigned long offset;
439 };
440
441 /*
442 * Data container for a dma_ops specific protection domain
443 */
444 struct dma_ops_domain {
445 struct list_head list;
446
447 /* generic protection domain information */
448 struct protection_domain domain;
449
450 /* size of the aperture for the mappings */
451 unsigned long aperture_size;
452
453 /* address we start to search for free addresses */
454 unsigned long next_address;
455
456 /* address space relevant data */
457 struct aperture_range *aperture[APERTURE_MAX_RANGES];
458
459 /* This will be set to true when TLB needs to be flushed */
460 bool need_flush;
461
462 /*
463 * if this is a preallocated domain, keep the device for which it was
464 * preallocated in this variable
465 */
466 u16 target_dev;
467 };
468
469 /*
470 * Structure where we save information about one hardware AMD IOMMU in the
471 * system.
472 */
473 struct amd_iommu {
474 struct list_head list;
475
476 /* Index within the IOMMU array */
477 int index;
478
479 /* locks the accesses to the hardware */
480 spinlock_t lock;
481
482 /* Pointer to PCI device of this IOMMU */
483 struct pci_dev *dev;
484
485 /* Cache pdev to root device for resume quirks */
486 struct pci_dev *root_pdev;
487
488 /* physical address of MMIO space */
489 u64 mmio_phys;
490 /* virtual address of MMIO space */
491 u8 *mmio_base;
492
493 /* capabilities of that IOMMU read from ACPI */
494 u32 cap;
495
496 /* flags read from acpi table */
497 u8 acpi_flags;
498
499 /* Extended features */
500 u64 features;
501
502 /* IOMMUv2 */
503 bool is_iommu_v2;
504
505 /*
506 * Capability pointer. There could be more than one IOMMU per PCI
507 * device function if there are more than one AMD IOMMU capability
508 * pointers.
509 */
510 u16 cap_ptr;
511
512 /* pci domain of this IOMMU */
513 u16 pci_seg;
514
515 /* first device this IOMMU handles. read from PCI */
516 u16 first_device;
517 /* last device this IOMMU handles. read from PCI */
518 u16 last_device;
519
520 /* start of exclusion range of that IOMMU */
521 u64 exclusion_start;
522 /* length of exclusion range of that IOMMU */
523 u64 exclusion_length;
524
525 /* command buffer virtual address */
526 u8 *cmd_buf;
527 /* size of command buffer */
528 u32 cmd_buf_size;
529
530 /* size of event buffer */
531 u32 evt_buf_size;
532 /* event buffer virtual address */
533 u8 *evt_buf;
534 /* MSI number for event interrupt */
535 u16 evt_msi_num;
536
537 /* Base of the PPR log, if present */
538 u8 *ppr_log;
539
540 /* true if interrupts for this IOMMU are already enabled */
541 bool int_enabled;
542
543 /* if one, we need to send a completion wait command */
544 bool need_sync;
545
546 /* default dma_ops domain for that IOMMU */
547 struct dma_ops_domain *default_dom;
548
549 /*
550 * We can't rely on the BIOS to restore all values on reinit, so we
551 * need to stash them
552 */
553
554 /* The iommu BAR */
555 u32 stored_addr_lo;
556 u32 stored_addr_hi;
557
558 /*
559 * Each iommu has 6 l1s, each of which is documented as having 0x12
560 * registers
561 */
562 u32 stored_l1[6][0x12];
563
564 /* The l2 indirect registers */
565 u32 stored_l2[0x83];
566 };
567
568 /*
569 * List with all IOMMUs in the system. This list is not locked because it is
570 * only written and read at driver initialization or suspend time
571 */
572 extern struct list_head amd_iommu_list;
573
574 /*
575 * Array with pointers to each IOMMU struct
576 * The indices are referenced in the protection domains
577 */
578 extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
579
580 /* Number of IOMMUs present in the system */
581 extern int amd_iommus_present;
582
583 /*
584 * Declarations for the global list of all protection domains
585 */
586 extern spinlock_t amd_iommu_pd_lock;
587 extern struct list_head amd_iommu_pd_list;
588
589 /*
590 * Structure defining one entry in the device table
591 */
592 struct dev_table_entry {
593 u64 data[4];
594 };
595
596 /*
597 * One entry for unity mappings parsed out of the ACPI table.
598 */
599 struct unity_map_entry {
600 struct list_head list;
601
602 /* starting device id this entry is used for (including) */
603 u16 devid_start;
604 /* end device id this entry is used for (including) */
605 u16 devid_end;
606
607 /* start address to unity map (including) */
608 u64 address_start;
609 /* end address to unity map (including) */
610 u64 address_end;
611
612 /* required protection */
613 int prot;
614 };
615
616 /*
617 * List of all unity mappings. It is not locked because as runtime it is only
618 * read. It is created at ACPI table parsing time.
619 */
620 extern struct list_head amd_iommu_unity_map;
621
622 /*
623 * Data structures for device handling
624 */
625
626 /*
627 * Device table used by hardware. Read and write accesses by software are
628 * locked with the amd_iommu_pd_table lock.
629 */
630 extern struct dev_table_entry *amd_iommu_dev_table;
631
632 /*
633 * Alias table to find requestor ids to device ids. Not locked because only
634 * read on runtime.
635 */
636 extern u16 *amd_iommu_alias_table;
637
638 /*
639 * Reverse lookup table to find the IOMMU which translates a specific device.
640 */
641 extern struct amd_iommu **amd_iommu_rlookup_table;
642
643 /* size of the dma_ops aperture as power of 2 */
644 extern unsigned amd_iommu_aperture_order;
645
646 /* largest PCI device id we expect translation requests for */
647 extern u16 amd_iommu_last_bdf;
648
649 /* allocation bitmap for domain ids */
650 extern unsigned long *amd_iommu_pd_alloc_bitmap;
651
652 /*
653 * If true, the addresses will be flushed on unmap time, not when
654 * they are reused
655 */
656 extern bool amd_iommu_unmap_flush;
657
658 /* Smallest number of PASIDs supported by any IOMMU in the system */
659 extern u32 amd_iommu_max_pasids;
660
661 extern bool amd_iommu_v2_present;
662
663 extern bool amd_iommu_force_isolation;
664
665 /* Max levels of glxval supported */
666 extern int amd_iommu_max_glx_val;
667
668 /* takes bus and device/function and returns the device id
669 * FIXME: should that be in generic PCI code? */
calc_devid(u8 bus,u8 devfn)670 static inline u16 calc_devid(u8 bus, u8 devfn)
671 {
672 return (((u16)bus) << 8) | devfn;
673 }
674
675 #ifdef CONFIG_AMD_IOMMU_STATS
676
677 struct __iommu_counter {
678 char *name;
679 struct dentry *dent;
680 u64 value;
681 };
682
683 #define DECLARE_STATS_COUNTER(nm) \
684 static struct __iommu_counter nm = { \
685 .name = #nm, \
686 }
687
688 #define INC_STATS_COUNTER(name) name.value += 1
689 #define ADD_STATS_COUNTER(name, x) name.value += (x)
690 #define SUB_STATS_COUNTER(name, x) name.value -= (x)
691
692 #else /* CONFIG_AMD_IOMMU_STATS */
693
694 #define DECLARE_STATS_COUNTER(name)
695 #define INC_STATS_COUNTER(name)
696 #define ADD_STATS_COUNTER(name, x)
697 #define SUB_STATS_COUNTER(name, x)
698
699 #endif /* CONFIG_AMD_IOMMU_STATS */
700
701 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
702