1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic AMD IO page table v2 allocator.
4 *
5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7 * Author: Vasant Hegde <vasant.hegde@amd.com>
8 */
9
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
11 #define dev_fmt(fmt) pr_fmt(fmt)
12
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16
17 #include <asm/barrier.h>
18
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21
22 #define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
23 #define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
24 #define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
25 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
26 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
27 #define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
28 #define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
29 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
30 #define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
31
32 #define MAX_PTRS_PER_PAGE 512
33
34 #define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
35 #define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
36
37
get_pgtable_level(void)38 static inline int get_pgtable_level(void)
39 {
40 /* 5 level page table is not supported */
41 return PAGE_MODE_4_LEVEL;
42 }
43
is_large_pte(u64 pte)44 static inline bool is_large_pte(u64 pte)
45 {
46 return (pte & IOMMU_PAGE_PSE);
47 }
48
alloc_pgtable_page(void)49 static inline void *alloc_pgtable_page(void)
50 {
51 return (void *)get_zeroed_page(GFP_KERNEL);
52 }
53
set_pgtable_attr(u64 * page)54 static inline u64 set_pgtable_attr(u64 *page)
55 {
56 u64 prot;
57
58 prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
59 prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
60
61 return (iommu_virt_to_phys(page) | prot);
62 }
63
get_pgtable_pte(u64 pte)64 static inline void *get_pgtable_pte(u64 pte)
65 {
66 return iommu_phys_to_virt(pte & PM_ADDR_MASK);
67 }
68
set_pte_attr(u64 paddr,u64 pg_size,int prot)69 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
70 {
71 u64 pte;
72
73 pte = __sme_set(paddr & PM_ADDR_MASK);
74 pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
75 pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
76
77 if (prot & IOMMU_PROT_IW)
78 pte |= IOMMU_PAGE_RW;
79
80 /* Large page */
81 if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
82 pte |= IOMMU_PAGE_PSE;
83
84 return pte;
85 }
86
get_alloc_page_size(u64 size)87 static inline u64 get_alloc_page_size(u64 size)
88 {
89 if (size >= IOMMU_PAGE_SIZE_1G)
90 return IOMMU_PAGE_SIZE_1G;
91
92 if (size >= IOMMU_PAGE_SIZE_2M)
93 return IOMMU_PAGE_SIZE_2M;
94
95 return PAGE_SIZE;
96 }
97
page_size_to_level(u64 pg_size)98 static inline int page_size_to_level(u64 pg_size)
99 {
100 if (pg_size == IOMMU_PAGE_SIZE_1G)
101 return PAGE_MODE_3_LEVEL;
102 if (pg_size == IOMMU_PAGE_SIZE_2M)
103 return PAGE_MODE_2_LEVEL;
104
105 return PAGE_MODE_1_LEVEL;
106 }
107
free_pgtable_page(u64 * pt)108 static inline void free_pgtable_page(u64 *pt)
109 {
110 free_page((unsigned long)pt);
111 }
112
free_pgtable(u64 * pt,int level)113 static void free_pgtable(u64 *pt, int level)
114 {
115 u64 *p;
116 int i;
117
118 for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
119 /* PTE present? */
120 if (!IOMMU_PTE_PRESENT(pt[i]))
121 continue;
122
123 if (is_large_pte(pt[i]))
124 continue;
125
126 /*
127 * Free the next level. No need to look at l1 tables here since
128 * they can only contain leaf PTEs; just free them directly.
129 */
130 p = get_pgtable_pte(pt[i]);
131 if (level > 2)
132 free_pgtable(p, level - 1);
133 else
134 free_pgtable_page(p);
135 }
136
137 free_pgtable_page(pt);
138 }
139
140 /* Allocate page table */
v2_alloc_pte(u64 * pgd,unsigned long iova,unsigned long pg_size,bool * updated)141 static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
142 unsigned long pg_size, bool *updated)
143 {
144 u64 *pte, *page;
145 int level, end_level;
146
147 level = get_pgtable_level() - 1;
148 end_level = page_size_to_level(pg_size);
149 pte = &pgd[PM_LEVEL_INDEX(level, iova)];
150 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
151
152 while (level >= end_level) {
153 u64 __pte, __npte;
154
155 __pte = *pte;
156
157 if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
158 /* Unmap large pte */
159 cmpxchg64(pte, *pte, 0ULL);
160 *updated = true;
161 continue;
162 }
163
164 if (!IOMMU_PTE_PRESENT(__pte)) {
165 page = alloc_pgtable_page();
166 if (!page)
167 return NULL;
168
169 __npte = set_pgtable_attr(page);
170 /* pte could have been changed somewhere. */
171 if (cmpxchg64(pte, __pte, __npte) != __pte)
172 free_pgtable_page(page);
173 else if (IOMMU_PTE_PRESENT(__pte))
174 *updated = true;
175
176 continue;
177 }
178
179 level -= 1;
180 pte = get_pgtable_pte(__pte);
181 pte = &pte[PM_LEVEL_INDEX(level, iova)];
182 }
183
184 /* Tear down existing pte entries */
185 if (IOMMU_PTE_PRESENT(*pte)) {
186 u64 *__pte;
187
188 *updated = true;
189 __pte = get_pgtable_pte(*pte);
190 cmpxchg64(pte, *pte, 0ULL);
191 if (pg_size == IOMMU_PAGE_SIZE_1G)
192 free_pgtable(__pte, end_level - 1);
193 else if (pg_size == IOMMU_PAGE_SIZE_2M)
194 free_pgtable_page(__pte);
195 }
196
197 return pte;
198 }
199
200 /*
201 * This function checks if there is a PTE for a given dma address.
202 * If there is one, it returns the pointer to it.
203 */
fetch_pte(struct amd_io_pgtable * pgtable,unsigned long iova,unsigned long * page_size)204 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
205 unsigned long iova, unsigned long *page_size)
206 {
207 u64 *pte;
208 int level;
209
210 level = get_pgtable_level() - 1;
211 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
212 /* Default page size is 4K */
213 *page_size = PAGE_SIZE;
214
215 while (level) {
216 /* Not present */
217 if (!IOMMU_PTE_PRESENT(*pte))
218 return NULL;
219
220 /* Walk to the next level */
221 pte = get_pgtable_pte(*pte);
222 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
223
224 /* Large page */
225 if (is_large_pte(*pte)) {
226 if (level == PAGE_MODE_3_LEVEL)
227 *page_size = IOMMU_PAGE_SIZE_1G;
228 else if (level == PAGE_MODE_2_LEVEL)
229 *page_size = IOMMU_PAGE_SIZE_2M;
230 else
231 return NULL; /* Wrongly set PSE bit in PTE */
232
233 break;
234 }
235
236 level -= 1;
237 }
238
239 return pte;
240 }
241
iommu_v2_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)242 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
243 phys_addr_t paddr, size_t pgsize, size_t pgcount,
244 int prot, gfp_t gfp, size_t *mapped)
245 {
246 struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
247 struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
248 u64 *pte;
249 unsigned long map_size;
250 unsigned long mapped_size = 0;
251 unsigned long o_iova = iova;
252 size_t size = pgcount << __ffs(pgsize);
253 int count = 0;
254 int ret = 0;
255 bool updated = false;
256
257 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
258 return -EINVAL;
259
260 if (!(prot & IOMMU_PROT_MASK))
261 return -EINVAL;
262
263 while (mapped_size < size) {
264 map_size = get_alloc_page_size(pgsize);
265 pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
266 if (!pte) {
267 ret = -EINVAL;
268 goto out;
269 }
270
271 *pte = set_pte_attr(paddr, map_size, prot);
272
273 count++;
274 iova += map_size;
275 paddr += map_size;
276 mapped_size += map_size;
277 }
278
279 out:
280 if (updated) {
281 if (count > 1)
282 amd_iommu_flush_tlb(&pdom->domain, 0);
283 else
284 amd_iommu_flush_page(&pdom->domain, 0, o_iova);
285 }
286
287 if (mapped)
288 *mapped += mapped_size;
289
290 return ret;
291 }
292
iommu_v2_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)293 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
294 unsigned long iova,
295 size_t pgsize, size_t pgcount,
296 struct iommu_iotlb_gather *gather)
297 {
298 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
299 struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
300 unsigned long unmap_size;
301 unsigned long unmapped = 0;
302 size_t size = pgcount << __ffs(pgsize);
303 u64 *pte;
304
305 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
306 return 0;
307
308 while (unmapped < size) {
309 pte = fetch_pte(pgtable, iova, &unmap_size);
310 if (!pte)
311 return unmapped;
312
313 *pte = 0ULL;
314
315 iova = (iova & ~(unmap_size - 1)) + unmap_size;
316 unmapped += unmap_size;
317 }
318
319 return unmapped;
320 }
321
iommu_v2_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)322 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
323 {
324 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
325 unsigned long offset_mask, pte_pgsize;
326 u64 *pte, __pte;
327
328 pte = fetch_pte(pgtable, iova, &pte_pgsize);
329 if (!pte || !IOMMU_PTE_PRESENT(*pte))
330 return 0;
331
332 offset_mask = pte_pgsize - 1;
333 __pte = __sme_clr(*pte & PM_ADDR_MASK);
334
335 return (__pte & ~offset_mask) | (iova & offset_mask);
336 }
337
338 /*
339 * ----------------------------------------------------
340 */
v2_tlb_flush_all(void * cookie)341 static void v2_tlb_flush_all(void *cookie)
342 {
343 }
344
v2_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)345 static void v2_tlb_flush_walk(unsigned long iova, size_t size,
346 size_t granule, void *cookie)
347 {
348 }
349
v2_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)350 static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
351 unsigned long iova, size_t granule,
352 void *cookie)
353 {
354 }
355
356 static const struct iommu_flush_ops v2_flush_ops = {
357 .tlb_flush_all = v2_tlb_flush_all,
358 .tlb_flush_walk = v2_tlb_flush_walk,
359 .tlb_add_page = v2_tlb_add_page,
360 };
361
v2_free_pgtable(struct io_pgtable * iop)362 static void v2_free_pgtable(struct io_pgtable *iop)
363 {
364 struct protection_domain *pdom;
365 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
366
367 pdom = container_of(pgtable, struct protection_domain, iop);
368 if (!(pdom->flags & PD_IOMMUV2_MASK))
369 return;
370
371 /*
372 * Make changes visible to IOMMUs. No need to clear gcr3 entry
373 * as gcr3 table is already freed.
374 */
375 amd_iommu_domain_update(pdom);
376
377 /* Free page table */
378 free_pgtable(pgtable->pgd, get_pgtable_level());
379 }
380
v2_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)381 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
382 {
383 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
384 struct protection_domain *pdom = (struct protection_domain *)cookie;
385 int ret;
386
387 pgtable->pgd = alloc_pgtable_page();
388 if (!pgtable->pgd)
389 return NULL;
390
391 ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
392 if (ret)
393 goto err_free_pgd;
394
395 pgtable->iop.ops.map_pages = iommu_v2_map_pages;
396 pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
397 pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
398
399 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
400 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
401 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
402 cfg->tlb = &v2_flush_ops;
403
404 return &pgtable->iop;
405
406 err_free_pgd:
407 free_pgtable_page(pgtable->pgd);
408
409 return NULL;
410 }
411
412 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
413 .alloc = v2_alloc_pgtable,
414 .free = v2_free_pgtable,
415 };
416