1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  * Rewrite, cleanup:
4  * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  */
20 
21 #ifndef _ASM_IOMMU_H
22 #define _ASM_IOMMU_H
23 #ifdef __KERNEL__
24 
25 #include <linux/compiler.h>
26 #include <linux/spinlock.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/machdep.h>
31 #include <asm/types.h>
32 
33 #define IOMMU_PAGE_SHIFT      12
34 #define IOMMU_PAGE_SIZE       (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
35 #define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1))
36 #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
37 
38 /* Boot time flags */
39 extern int iommu_is_off;
40 extern int iommu_force_on;
41 
42 /* Pure 2^n version of get_order */
get_iommu_order(unsigned long size)43 static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
44 {
45 	return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
46 }
47 
48 
49 /*
50  * IOMAP_MAX_ORDER defines the largest contiguous block
51  * of dma space we can get.  IOMAP_MAX_ORDER = 13
52  * allows up to 2**12 pages (4096 * 4096) = 16 MB
53  */
54 #define IOMAP_MAX_ORDER		13
55 
56 struct iommu_table {
57 	unsigned long  it_busno;     /* Bus number this table belongs to */
58 	unsigned long  it_size;      /* Size of iommu table in entries */
59 	unsigned long  it_offset;    /* Offset into global table */
60 	unsigned long  it_base;      /* mapped address of tce table */
61 	unsigned long  it_index;     /* which iommu table this is */
62 	unsigned long  it_type;      /* type: PCI or Virtual Bus */
63 	unsigned long  it_blocksize; /* Entries in each block (cacheline) */
64 	unsigned long  it_hint;      /* Hint for next alloc */
65 	unsigned long  it_largehint; /* Hint for large allocs */
66 	unsigned long  it_halfpoint; /* Breaking point for small/large allocs */
67 	spinlock_t     it_lock;      /* Protects it_map */
68 	unsigned long *it_map;       /* A simple allocation bitmap for now */
69 };
70 
71 struct scatterlist;
72 
set_iommu_table_base(struct device * dev,void * base)73 static inline void set_iommu_table_base(struct device *dev, void *base)
74 {
75 	dev->archdata.dma_data.iommu_table_base = base;
76 }
77 
get_iommu_table_base(struct device * dev)78 static inline void *get_iommu_table_base(struct device *dev)
79 {
80 	return dev->archdata.dma_data.iommu_table_base;
81 }
82 
83 /* Frees table for an individual device node */
84 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
85 
86 /* Initializes an iommu_table based in values set in the passed-in
87  * structure
88  */
89 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
90 					    int nid);
91 
92 extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
93 			struct scatterlist *sglist, int nelems,
94 			unsigned long mask, enum dma_data_direction direction,
95 			struct dma_attrs *attrs);
96 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
97 			   int nelems, enum dma_data_direction direction,
98 			   struct dma_attrs *attrs);
99 
100 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
101 				  size_t size, dma_addr_t *dma_handle,
102 				  unsigned long mask, gfp_t flag, int node);
103 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
104 				void *vaddr, dma_addr_t dma_handle);
105 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
106 				 struct page *page, unsigned long offset,
107 				 size_t size, unsigned long mask,
108 				 enum dma_data_direction direction,
109 				 struct dma_attrs *attrs);
110 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
111 			     size_t size, enum dma_data_direction direction,
112 			     struct dma_attrs *attrs);
113 
114 extern void iommu_init_early_pSeries(void);
115 extern void iommu_init_early_iSeries(void);
116 extern void iommu_init_early_dart(void);
117 extern void iommu_init_early_pasemi(void);
118 
119 #ifdef CONFIG_PCI
120 extern void pci_iommu_init(void);
121 extern void pci_direct_iommu_init(void);
122 #else
pci_iommu_init(void)123 static inline void pci_iommu_init(void) { }
124 #endif
125 
126 extern void alloc_dart_table(void);
127 #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
iommu_save(void)128 static inline void iommu_save(void)
129 {
130 	if (ppc_md.iommu_save)
131 		ppc_md.iommu_save();
132 }
133 
iommu_restore(void)134 static inline void iommu_restore(void)
135 {
136 	if (ppc_md.iommu_restore)
137 		ppc_md.iommu_restore();
138 }
139 #endif
140 
141 #endif /* __KERNEL__ */
142 #endif /* _ASM_IOMMU_H */
143