1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __IO_PGTABLE_H
3 #define __IO_PGTABLE_H
4 
5 #include <linux/bitops.h>
6 #include <linux/iommu.h>
7 
8 /*
9  * Public API for use by IOMMU drivers
10  */
11 enum io_pgtable_fmt {
12 	ARM_32_LPAE_S1,
13 	ARM_32_LPAE_S2,
14 	ARM_64_LPAE_S1,
15 	ARM_64_LPAE_S2,
16 	ARM_V7S,
17 	ARM_MALI_LPAE,
18 	AMD_IOMMU_V1,
19 	AMD_IOMMU_V2,
20 	APPLE_DART,
21 	APPLE_DART2,
22 	IO_PGTABLE_NUM_FMTS,
23 };
24 
25 /**
26  * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
27  *
28  * @tlb_flush_all:  Synchronously invalidate the entire TLB context.
29  * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
30  *                  (sometimes referred to as the "walk cache") for a virtual
31  *                  address range.
32  * @tlb_add_page:   Optional callback to queue up leaf TLB invalidation for a
33  *                  single page.  IOMMUs that cannot batch TLB invalidation
34  *                  operations efficiently will typically issue them here, but
35  *                  others may decide to update the iommu_iotlb_gather structure
36  *                  and defer the invalidation until iommu_iotlb_sync() instead.
37  *
38  * Note that these can all be called in atomic context and must therefore
39  * not block.
40  */
41 struct iommu_flush_ops {
42 	void (*tlb_flush_all)(void *cookie);
43 	void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
44 			       void *cookie);
45 	void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
46 			     unsigned long iova, size_t granule, void *cookie);
47 };
48 
49 /**
50  * struct io_pgtable_cfg - Configuration data for a set of page tables.
51  *
52  * @quirks:        A bitmap of hardware quirks that require some special
53  *                 action by the low-level page table allocator.
54  * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
55  *                 tables.
56  * @ias:           Input address (iova) size, in bits.
57  * @oas:           Output address (paddr) size, in bits.
58  * @coherent_walk  A flag to indicate whether or not page table walks made
59  *                 by the IOMMU are coherent with the CPU caches.
60  * @tlb:           TLB management callbacks for this set of tables.
61  * @iommu_dev:     The device representing the DMA configuration for the
62  *                 page table walker.
63  */
64 struct io_pgtable_cfg {
65 	/*
66 	 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
67 	 *	stage 1 PTEs, for hardware which insists on validating them
68 	 *	even in	non-secure state where they should normally be ignored.
69 	 *
70 	 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
71 	 *	IOMMU_NOEXEC flags and map everything with full access, for
72 	 *	hardware which does not implement the permissions of a given
73 	 *	format, and/or requires some format-specific default value.
74 	 *
75 	 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
76 	 *	to support up to 35 bits PA where the bit32, bit33 and bit34 are
77 	 *	encoded in the bit9, bit4 and bit5 of the PTE respectively.
78 	 *
79 	 * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
80 	 *	extend the translation table base support up to 35 bits PA, the
81 	 *	encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
82 	 *
83 	 * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
84 	 *	for use in the upper half of a split address space.
85 	 *
86 	 * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
87 	 *	attributes set in the TCR for a non-coherent page-table walker.
88 	 */
89 	#define IO_PGTABLE_QUIRK_ARM_NS			BIT(0)
90 	#define IO_PGTABLE_QUIRK_NO_PERMS		BIT(1)
91 	#define IO_PGTABLE_QUIRK_ARM_MTK_EXT		BIT(3)
92 	#define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT	BIT(4)
93 	#define IO_PGTABLE_QUIRK_ARM_TTBR1		BIT(5)
94 	#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA		BIT(6)
95 	unsigned long			quirks;
96 	unsigned long			pgsize_bitmap;
97 	unsigned int			ias;
98 	unsigned int			oas;
99 	bool				coherent_walk;
100 	const struct iommu_flush_ops	*tlb;
101 	struct device			*iommu_dev;
102 
103 	/* Low-level data specific to the table format */
104 	union {
105 		struct {
106 			u64	ttbr;
107 			struct {
108 				u32	ips:3;
109 				u32	tg:2;
110 				u32	sh:2;
111 				u32	orgn:2;
112 				u32	irgn:2;
113 				u32	tsz:6;
114 			}	tcr;
115 			u64	mair;
116 		} arm_lpae_s1_cfg;
117 
118 		struct {
119 			u64	vttbr;
120 			struct {
121 				u32	ps:3;
122 				u32	tg:2;
123 				u32	sh:2;
124 				u32	orgn:2;
125 				u32	irgn:2;
126 				u32	sl:2;
127 				u32	tsz:6;
128 			}	vtcr;
129 		} arm_lpae_s2_cfg;
130 
131 		struct {
132 			u32	ttbr;
133 			u32	tcr;
134 			u32	nmrr;
135 			u32	prrr;
136 		} arm_v7s_cfg;
137 
138 		struct {
139 			u64	transtab;
140 			u64	memattr;
141 		} arm_mali_lpae_cfg;
142 
143 		struct {
144 			u64 ttbr[4];
145 			u32 n_ttbrs;
146 		} apple_dart_cfg;
147 	};
148 };
149 
150 /**
151  * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
152  *
153  * @map:          Map a physically contiguous memory region.
154  * @map_pages:    Map a physically contiguous range of pages of the same size.
155  * @unmap:        Unmap a physically contiguous memory region.
156  * @unmap_pages:  Unmap a range of virtually contiguous pages of the same size.
157  * @iova_to_phys: Translate iova to physical address.
158  *
159  * These functions map directly onto the iommu_ops member functions with
160  * the same names.
161  */
162 struct io_pgtable_ops {
163 	int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
164 		   phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
165 	int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
166 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
167 			 int prot, gfp_t gfp, size_t *mapped);
168 	size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
169 			size_t size, struct iommu_iotlb_gather *gather);
170 	size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
171 			      size_t pgsize, size_t pgcount,
172 			      struct iommu_iotlb_gather *gather);
173 	phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
174 				    unsigned long iova);
175 };
176 
177 /**
178  * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
179  *
180  * @fmt:    The page table format.
181  * @cfg:    The page table configuration. This will be modified to represent
182  *          the configuration actually provided by the allocator (e.g. the
183  *          pgsize_bitmap may be restricted).
184  * @cookie: An opaque token provided by the IOMMU driver and passed back to
185  *          the callback routines in cfg->tlb.
186  */
187 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
188 					    struct io_pgtable_cfg *cfg,
189 					    void *cookie);
190 
191 /**
192  * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
193  *                         *must* ensure that the page table is no longer
194  *                         live, but the TLB can be dirty.
195  *
196  * @ops: The ops returned from alloc_io_pgtable_ops.
197  */
198 void free_io_pgtable_ops(struct io_pgtable_ops *ops);
199 
200 
201 /*
202  * Internal structures for page table allocator implementations.
203  */
204 
205 /**
206  * struct io_pgtable - Internal structure describing a set of page tables.
207  *
208  * @fmt:    The page table format.
209  * @cookie: An opaque token provided by the IOMMU driver and passed back to
210  *          any callback routines.
211  * @cfg:    A copy of the page table configuration.
212  * @ops:    The page table operations in use for this set of page tables.
213  */
214 struct io_pgtable {
215 	enum io_pgtable_fmt	fmt;
216 	void			*cookie;
217 	struct io_pgtable_cfg	cfg;
218 	struct io_pgtable_ops	ops;
219 };
220 
221 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
222 
io_pgtable_tlb_flush_all(struct io_pgtable * iop)223 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
224 {
225 	if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
226 		iop->cfg.tlb->tlb_flush_all(iop->cookie);
227 }
228 
229 static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable * iop,unsigned long iova,size_t size,size_t granule)230 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
231 			  size_t size, size_t granule)
232 {
233 	if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
234 		iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
235 }
236 
237 static inline void
io_pgtable_tlb_add_page(struct io_pgtable * iop,struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule)238 io_pgtable_tlb_add_page(struct io_pgtable *iop,
239 			struct iommu_iotlb_gather * gather, unsigned long iova,
240 			size_t granule)
241 {
242 	if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
243 		iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
244 }
245 
246 /**
247  * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
248  *                              particular format.
249  *
250  * @alloc: Allocate a set of page tables described by cfg.
251  * @free:  Free the page tables associated with iop.
252  */
253 struct io_pgtable_init_fns {
254 	struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
255 	void (*free)(struct io_pgtable *iop);
256 };
257 
258 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
259 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
260 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
261 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
262 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
263 extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
264 extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
265 extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
266 extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
267 
268 #endif /* __IO_PGTABLE_H */
269