1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <linux/lockdep.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shmem_fs.h>
8 #include <linux/suspend.h>
9 #include <linux/sched/mm.h>
10 #include <asm/sgx.h>
11 #include "encl.h"
12 #include "encls.h"
13 #include "sgx.h"
14 
15 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
16 			    struct sgx_backing *backing);
17 
18 #define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
19 /*
20  * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
21  * determine the page index associated with the first PCMD entry
22  * within a PCMD page.
23  */
24 #define PCMD_FIRST_MASK GENMASK(4, 0)
25 
26 /**
27  * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
28  *                               a PCMD page is in process of being reclaimed.
29  * @encl:        Enclave to which PCMD page belongs
30  * @start_addr:  Address of enclave page using first entry within the PCMD page
31  *
32  * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
33  * stored. The PCMD data of a reclaimed enclave page contains enough
34  * information for the processor to verify the page at the time
35  * it is loaded back into the Enclave Page Cache (EPC).
36  *
37  * The backing storage to which enclave pages are reclaimed is laid out as
38  * follows:
39  * Encrypted enclave pages:SECS page:PCMD pages
40  *
41  * Each PCMD page contains the PCMD metadata of
42  * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
43  *
44  * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
45  * process of getting data (and thus soon being non-empty). (b) is tested with
46  * a check if an enclave page sharing the PCMD page is in the process of being
47  * reclaimed.
48  *
49  * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
50  * intends to reclaim that enclave page - it means that the PCMD page
51  * associated with that enclave page is about to get some data and thus
52  * even if the PCMD page is empty, it should not be truncated.
53  *
54  * Context: Enclave mutex (&sgx_encl->lock) must be held.
55  * Return: 1 if the reclaimer is about to write to the PCMD page
56  *         0 if the reclaimer has no intention to write to the PCMD page
57  */
reclaimer_writing_to_pcmd(struct sgx_encl * encl,unsigned long start_addr)58 static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
59 				     unsigned long start_addr)
60 {
61 	int reclaimed = 0;
62 	int i;
63 
64 	/*
65 	 * PCMD_FIRST_MASK is based on number of PCMD entries within
66 	 * PCMD page being 32.
67 	 */
68 	BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
69 
70 	for (i = 0; i < PCMDS_PER_PAGE; i++) {
71 		struct sgx_encl_page *entry;
72 		unsigned long addr;
73 
74 		addr = start_addr + i * PAGE_SIZE;
75 
76 		/*
77 		 * Stop when reaching the SECS page - it does not
78 		 * have a page_array entry and its reclaim is
79 		 * started and completed with enclave mutex held so
80 		 * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
81 		 * flag.
82 		 */
83 		if (addr == encl->base + encl->size)
84 			break;
85 
86 		entry = xa_load(&encl->page_array, PFN_DOWN(addr));
87 		if (!entry)
88 			continue;
89 
90 		/*
91 		 * VA page slot ID uses same bit as the flag so it is important
92 		 * to ensure that the page is not already in backing store.
93 		 */
94 		if (entry->epc_page &&
95 		    (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
96 			reclaimed = 1;
97 			break;
98 		}
99 	}
100 
101 	return reclaimed;
102 }
103 
104 /*
105  * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
106  * follow right after the EPC data in the backing storage. In addition to the
107  * visible enclave pages, there's one extra page slot for SECS, before PCMD
108  * structs.
109  */
sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl * encl,unsigned long page_index)110 static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
111 							    unsigned long page_index)
112 {
113 	pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
114 
115 	return epc_end_off + page_index * sizeof(struct sgx_pcmd);
116 }
117 
118 /*
119  * Free a page from the backing storage in the given page index.
120  */
sgx_encl_truncate_backing_page(struct sgx_encl * encl,unsigned long page_index)121 static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
122 {
123 	struct inode *inode = file_inode(encl->backing);
124 
125 	shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
126 }
127 
128 /*
129  * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
130  * Pages" in the SDM.
131  */
__sgx_encl_eldu(struct sgx_encl_page * encl_page,struct sgx_epc_page * epc_page,struct sgx_epc_page * secs_page)132 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
133 			   struct sgx_epc_page *epc_page,
134 			   struct sgx_epc_page *secs_page)
135 {
136 	unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
137 	struct sgx_encl *encl = encl_page->encl;
138 	pgoff_t page_index, page_pcmd_off;
139 	unsigned long pcmd_first_page;
140 	struct sgx_pageinfo pginfo;
141 	struct sgx_backing b;
142 	bool pcmd_page_empty;
143 	u8 *pcmd_page;
144 	int ret;
145 
146 	if (secs_page)
147 		page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
148 	else
149 		page_index = PFN_DOWN(encl->size);
150 
151 	/*
152 	 * Address of enclave page using the first entry within the PCMD page.
153 	 */
154 	pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
155 
156 	page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
157 
158 	ret = sgx_encl_lookup_backing(encl, page_index, &b);
159 	if (ret)
160 		return ret;
161 
162 	pginfo.addr = encl_page->desc & PAGE_MASK;
163 	pginfo.contents = (unsigned long)kmap_atomic(b.contents);
164 	pcmd_page = kmap_atomic(b.pcmd);
165 	pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
166 
167 	if (secs_page)
168 		pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
169 	else
170 		pginfo.secs = 0;
171 
172 	ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page),
173 		     sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset);
174 	if (ret) {
175 		if (encls_failed(ret))
176 			ENCLS_WARN(ret, "ELDU");
177 
178 		ret = -EFAULT;
179 	}
180 
181 	memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
182 	set_page_dirty(b.pcmd);
183 
184 	/*
185 	 * The area for the PCMD in the page was zeroed above.  Check if the
186 	 * whole page is now empty meaning that all PCMD's have been zeroed:
187 	 */
188 	pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
189 
190 	kunmap_atomic(pcmd_page);
191 	kunmap_atomic((void *)(unsigned long)pginfo.contents);
192 
193 	get_page(b.pcmd);
194 	sgx_encl_put_backing(&b);
195 
196 	sgx_encl_truncate_backing_page(encl, page_index);
197 
198 	if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
199 		sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
200 		pcmd_page = kmap_atomic(b.pcmd);
201 		if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
202 			pr_warn("PCMD page not empty after truncate.\n");
203 		kunmap_atomic(pcmd_page);
204 	}
205 
206 	put_page(b.pcmd);
207 
208 	return ret;
209 }
210 
sgx_encl_eldu(struct sgx_encl_page * encl_page,struct sgx_epc_page * secs_page)211 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
212 					  struct sgx_epc_page *secs_page)
213 {
214 
215 	unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
216 	struct sgx_encl *encl = encl_page->encl;
217 	struct sgx_epc_page *epc_page;
218 	int ret;
219 
220 	epc_page = sgx_alloc_epc_page(encl_page, false);
221 	if (IS_ERR(epc_page))
222 		return epc_page;
223 
224 	ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
225 	if (ret) {
226 		sgx_encl_free_epc_page(epc_page);
227 		return ERR_PTR(ret);
228 	}
229 
230 	sgx_free_va_slot(encl_page->va_page, va_offset);
231 	list_move(&encl_page->va_page->list, &encl->va_pages);
232 	encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
233 	encl_page->epc_page = epc_page;
234 
235 	return epc_page;
236 }
237 
__sgx_encl_load_page(struct sgx_encl * encl,struct sgx_encl_page * entry)238 static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
239 						  struct sgx_encl_page *entry)
240 {
241 	struct sgx_epc_page *epc_page;
242 
243 	/* Entry successfully located. */
244 	if (entry->epc_page) {
245 		if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
246 			return ERR_PTR(-EBUSY);
247 
248 		return entry;
249 	}
250 
251 	if (!(encl->secs.epc_page)) {
252 		epc_page = sgx_encl_eldu(&encl->secs, NULL);
253 		if (IS_ERR(epc_page))
254 			return ERR_CAST(epc_page);
255 	}
256 
257 	epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
258 	if (IS_ERR(epc_page))
259 		return ERR_CAST(epc_page);
260 
261 	encl->secs_child_cnt++;
262 	sgx_mark_page_reclaimable(entry->epc_page);
263 
264 	return entry;
265 }
266 
sgx_encl_load_page_in_vma(struct sgx_encl * encl,unsigned long addr,unsigned long vm_flags)267 static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
268 						       unsigned long addr,
269 						       unsigned long vm_flags)
270 {
271 	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
272 	struct sgx_encl_page *entry;
273 
274 	entry = xa_load(&encl->page_array, PFN_DOWN(addr));
275 	if (!entry)
276 		return ERR_PTR(-EFAULT);
277 
278 	/*
279 	 * Verify that the page has equal or higher build time
280 	 * permissions than the VMA permissions (i.e. the subset of {VM_READ,
281 	 * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
282 	 */
283 	if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
284 		return ERR_PTR(-EFAULT);
285 
286 	return __sgx_encl_load_page(encl, entry);
287 }
288 
sgx_encl_load_page(struct sgx_encl * encl,unsigned long addr)289 struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
290 					 unsigned long addr)
291 {
292 	struct sgx_encl_page *entry;
293 
294 	entry = xa_load(&encl->page_array, PFN_DOWN(addr));
295 	if (!entry)
296 		return ERR_PTR(-EFAULT);
297 
298 	return __sgx_encl_load_page(encl, entry);
299 }
300 
301 /**
302  * sgx_encl_eaug_page() - Dynamically add page to initialized enclave
303  * @vma:	VMA obtained from fault info from where page is accessed
304  * @encl:	enclave accessing the page
305  * @addr:	address that triggered the page fault
306  *
307  * When an initialized enclave accesses a page with no backing EPC page
308  * on a SGX2 system then the EPC can be added dynamically via the SGX2
309  * ENCLS[EAUG] instruction.
310  *
311  * Returns: Appropriate vm_fault_t: VM_FAULT_NOPAGE when PTE was installed
312  * successfully, VM_FAULT_SIGBUS or VM_FAULT_OOM as error otherwise.
313  */
sgx_encl_eaug_page(struct vm_area_struct * vma,struct sgx_encl * encl,unsigned long addr)314 static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
315 				     struct sgx_encl *encl, unsigned long addr)
316 {
317 	vm_fault_t vmret = VM_FAULT_SIGBUS;
318 	struct sgx_pageinfo pginfo = {0};
319 	struct sgx_encl_page *encl_page;
320 	struct sgx_epc_page *epc_page;
321 	struct sgx_va_page *va_page;
322 	unsigned long phys_addr;
323 	u64 secinfo_flags;
324 	int ret;
325 
326 	if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
327 		return VM_FAULT_SIGBUS;
328 
329 	/*
330 	 * Ignore internal permission checking for dynamically added pages.
331 	 * They matter only for data added during the pre-initialization
332 	 * phase. The enclave decides the permissions by the means of
333 	 * EACCEPT, EACCEPTCOPY and EMODPE.
334 	 */
335 	secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
336 	encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags);
337 	if (IS_ERR(encl_page))
338 		return VM_FAULT_OOM;
339 
340 	mutex_lock(&encl->lock);
341 
342 	epc_page = sgx_alloc_epc_page(encl_page, false);
343 	if (IS_ERR(epc_page)) {
344 		if (PTR_ERR(epc_page) == -EBUSY)
345 			vmret =  VM_FAULT_NOPAGE;
346 		goto err_out_unlock;
347 	}
348 
349 	va_page = sgx_encl_grow(encl, false);
350 	if (IS_ERR(va_page)) {
351 		if (PTR_ERR(va_page) == -EBUSY)
352 			vmret = VM_FAULT_NOPAGE;
353 		goto err_out_epc;
354 	}
355 
356 	if (va_page)
357 		list_add(&va_page->list, &encl->va_pages);
358 
359 	ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
360 			encl_page, GFP_KERNEL);
361 	/*
362 	 * If ret == -EBUSY then page was created in another flow while
363 	 * running without encl->lock
364 	 */
365 	if (ret)
366 		goto err_out_shrink;
367 
368 	pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
369 	pginfo.addr = encl_page->desc & PAGE_MASK;
370 	pginfo.metadata = 0;
371 
372 	ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
373 	if (ret)
374 		goto err_out;
375 
376 	encl_page->encl = encl;
377 	encl_page->epc_page = epc_page;
378 	encl_page->type = SGX_PAGE_TYPE_REG;
379 	encl->secs_child_cnt++;
380 
381 	sgx_mark_page_reclaimable(encl_page->epc_page);
382 
383 	phys_addr = sgx_get_epc_phys_addr(epc_page);
384 	/*
385 	 * Do not undo everything when creating PTE entry fails - next #PF
386 	 * would find page ready for a PTE.
387 	 */
388 	vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
389 	if (vmret != VM_FAULT_NOPAGE) {
390 		mutex_unlock(&encl->lock);
391 		return VM_FAULT_SIGBUS;
392 	}
393 	mutex_unlock(&encl->lock);
394 	return VM_FAULT_NOPAGE;
395 
396 err_out:
397 	xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
398 
399 err_out_shrink:
400 	sgx_encl_shrink(encl, va_page);
401 err_out_epc:
402 	sgx_encl_free_epc_page(epc_page);
403 err_out_unlock:
404 	mutex_unlock(&encl->lock);
405 	kfree(encl_page);
406 
407 	return vmret;
408 }
409 
sgx_vma_fault(struct vm_fault * vmf)410 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
411 {
412 	unsigned long addr = (unsigned long)vmf->address;
413 	struct vm_area_struct *vma = vmf->vma;
414 	struct sgx_encl_page *entry;
415 	unsigned long phys_addr;
416 	struct sgx_encl *encl;
417 	vm_fault_t ret;
418 
419 	encl = vma->vm_private_data;
420 
421 	/*
422 	 * It's very unlikely but possible that allocating memory for the
423 	 * mm_list entry of a forked process failed in sgx_vma_open(). When
424 	 * this happens, vm_private_data is set to NULL.
425 	 */
426 	if (unlikely(!encl))
427 		return VM_FAULT_SIGBUS;
428 
429 	/*
430 	 * The page_array keeps track of all enclave pages, whether they
431 	 * are swapped out or not. If there is no entry for this page and
432 	 * the system supports SGX2 then it is possible to dynamically add
433 	 * a new enclave page. This is only possible for an initialized
434 	 * enclave that will be checked for right away.
435 	 */
436 	if (cpu_feature_enabled(X86_FEATURE_SGX2) &&
437 	    (!xa_load(&encl->page_array, PFN_DOWN(addr))))
438 		return sgx_encl_eaug_page(vma, encl, addr);
439 
440 	mutex_lock(&encl->lock);
441 
442 	entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
443 	if (IS_ERR(entry)) {
444 		mutex_unlock(&encl->lock);
445 
446 		if (PTR_ERR(entry) == -EBUSY)
447 			return VM_FAULT_NOPAGE;
448 
449 		return VM_FAULT_SIGBUS;
450 	}
451 
452 	phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
453 
454 	ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
455 	if (ret != VM_FAULT_NOPAGE) {
456 		mutex_unlock(&encl->lock);
457 
458 		return VM_FAULT_SIGBUS;
459 	}
460 
461 	sgx_encl_test_and_clear_young(vma->vm_mm, entry);
462 	mutex_unlock(&encl->lock);
463 
464 	return VM_FAULT_NOPAGE;
465 }
466 
sgx_vma_open(struct vm_area_struct * vma)467 static void sgx_vma_open(struct vm_area_struct *vma)
468 {
469 	struct sgx_encl *encl = vma->vm_private_data;
470 
471 	/*
472 	 * It's possible but unlikely that vm_private_data is NULL. This can
473 	 * happen in a grandchild of a process, when sgx_encl_mm_add() had
474 	 * failed to allocate memory in this callback.
475 	 */
476 	if (unlikely(!encl))
477 		return;
478 
479 	if (sgx_encl_mm_add(encl, vma->vm_mm))
480 		vma->vm_private_data = NULL;
481 }
482 
483 
484 /**
485  * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
486  * @encl:		an enclave pointer
487  * @start:		lower bound of the address range, inclusive
488  * @end:		upper bound of the address range, exclusive
489  * @vm_flags:		VMA flags
490  *
491  * Iterate through the enclave pages contained within [@start, @end) to verify
492  * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC}
493  * do not contain any permissions that are not contained in the build time
494  * permissions of any of the enclave pages within the given address range.
495  *
496  * An enclave creator must declare the strongest permissions that will be
497  * needed for each enclave page. This ensures that mappings have the identical
498  * or weaker permissions than the earlier declared permissions.
499  *
500  * Return: 0 on success, -EACCES otherwise
501  */
sgx_encl_may_map(struct sgx_encl * encl,unsigned long start,unsigned long end,unsigned long vm_flags)502 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
503 		     unsigned long end, unsigned long vm_flags)
504 {
505 	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
506 	struct sgx_encl_page *page;
507 	unsigned long count = 0;
508 	int ret = 0;
509 
510 	XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
511 
512 	/* Disallow mapping outside enclave's address range. */
513 	if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) &&
514 	    (start < encl->base || end > encl->base + encl->size))
515 		return -EACCES;
516 
517 	/*
518 	 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
519 	 * conflict with the enclave page permissions.
520 	 */
521 	if (current->personality & READ_IMPLIES_EXEC)
522 		return -EACCES;
523 
524 	mutex_lock(&encl->lock);
525 	xas_lock(&xas);
526 	xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
527 		if (~page->vm_max_prot_bits & vm_prot_bits) {
528 			ret = -EACCES;
529 			break;
530 		}
531 
532 		/* Reschedule on every XA_CHECK_SCHED iteration. */
533 		if (!(++count % XA_CHECK_SCHED)) {
534 			xas_pause(&xas);
535 			xas_unlock(&xas);
536 			mutex_unlock(&encl->lock);
537 
538 			cond_resched();
539 
540 			mutex_lock(&encl->lock);
541 			xas_lock(&xas);
542 		}
543 	}
544 	xas_unlock(&xas);
545 	mutex_unlock(&encl->lock);
546 
547 	return ret;
548 }
549 
sgx_vma_mprotect(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long newflags)550 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
551 			    unsigned long end, unsigned long newflags)
552 {
553 	return sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
554 }
555 
sgx_encl_debug_read(struct sgx_encl * encl,struct sgx_encl_page * page,unsigned long addr,void * data)556 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page,
557 			       unsigned long addr, void *data)
558 {
559 	unsigned long offset = addr & ~PAGE_MASK;
560 	int ret;
561 
562 
563 	ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
564 	if (ret)
565 		return -EIO;
566 
567 	return 0;
568 }
569 
sgx_encl_debug_write(struct sgx_encl * encl,struct sgx_encl_page * page,unsigned long addr,void * data)570 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page,
571 				unsigned long addr, void *data)
572 {
573 	unsigned long offset = addr & ~PAGE_MASK;
574 	int ret;
575 
576 	ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
577 	if (ret)
578 		return -EIO;
579 
580 	return 0;
581 }
582 
583 /*
584  * Load an enclave page to EPC if required, and take encl->lock.
585  */
sgx_encl_reserve_page(struct sgx_encl * encl,unsigned long addr,unsigned long vm_flags)586 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
587 						   unsigned long addr,
588 						   unsigned long vm_flags)
589 {
590 	struct sgx_encl_page *entry;
591 
592 	for ( ; ; ) {
593 		mutex_lock(&encl->lock);
594 
595 		entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags);
596 		if (PTR_ERR(entry) != -EBUSY)
597 			break;
598 
599 		mutex_unlock(&encl->lock);
600 	}
601 
602 	if (IS_ERR(entry))
603 		mutex_unlock(&encl->lock);
604 
605 	return entry;
606 }
607 
sgx_vma_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)608 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
609 			  void *buf, int len, int write)
610 {
611 	struct sgx_encl *encl = vma->vm_private_data;
612 	struct sgx_encl_page *entry = NULL;
613 	char data[sizeof(unsigned long)];
614 	unsigned long align;
615 	int offset;
616 	int cnt;
617 	int ret = 0;
618 	int i;
619 
620 	/*
621 	 * If process was forked, VMA is still there but vm_private_data is set
622 	 * to NULL.
623 	 */
624 	if (!encl)
625 		return -EFAULT;
626 
627 	if (!test_bit(SGX_ENCL_DEBUG, &encl->flags))
628 		return -EFAULT;
629 
630 	for (i = 0; i < len; i += cnt) {
631 		entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
632 					      vma->vm_flags);
633 		if (IS_ERR(entry)) {
634 			ret = PTR_ERR(entry);
635 			break;
636 		}
637 
638 		align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
639 		offset = (addr + i) & (sizeof(unsigned long) - 1);
640 		cnt = sizeof(unsigned long) - offset;
641 		cnt = min(cnt, len - i);
642 
643 		ret = sgx_encl_debug_read(encl, entry, align, data);
644 		if (ret)
645 			goto out;
646 
647 		if (write) {
648 			memcpy(data + offset, buf + i, cnt);
649 			ret = sgx_encl_debug_write(encl, entry, align, data);
650 			if (ret)
651 				goto out;
652 		} else {
653 			memcpy(buf + i, data + offset, cnt);
654 		}
655 
656 out:
657 		mutex_unlock(&encl->lock);
658 
659 		if (ret)
660 			break;
661 	}
662 
663 	return ret < 0 ? ret : i;
664 }
665 
666 const struct vm_operations_struct sgx_vm_ops = {
667 	.fault = sgx_vma_fault,
668 	.mprotect = sgx_vma_mprotect,
669 	.open = sgx_vma_open,
670 	.access = sgx_vma_access,
671 };
672 
673 /**
674  * sgx_encl_release - Destroy an enclave instance
675  * @ref:	address of a kref inside &sgx_encl
676  *
677  * Used together with kref_put(). Frees all the resources associated with the
678  * enclave and the instance itself.
679  */
sgx_encl_release(struct kref * ref)680 void sgx_encl_release(struct kref *ref)
681 {
682 	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
683 	unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
684 	struct sgx_va_page *va_page;
685 	struct sgx_encl_page *entry;
686 	unsigned long count = 0;
687 
688 	XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
689 
690 	xas_lock(&xas);
691 	xas_for_each(&xas, entry, max_page_index) {
692 		if (entry->epc_page) {
693 			/*
694 			 * The page and its radix tree entry cannot be freed
695 			 * if the page is being held by the reclaimer.
696 			 */
697 			if (sgx_unmark_page_reclaimable(entry->epc_page))
698 				continue;
699 
700 			sgx_encl_free_epc_page(entry->epc_page);
701 			encl->secs_child_cnt--;
702 			entry->epc_page = NULL;
703 		}
704 
705 		kfree(entry);
706 		/*
707 		 * Invoke scheduler on every XA_CHECK_SCHED iteration
708 		 * to prevent soft lockups.
709 		 */
710 		if (!(++count % XA_CHECK_SCHED)) {
711 			xas_pause(&xas);
712 			xas_unlock(&xas);
713 
714 			cond_resched();
715 
716 			xas_lock(&xas);
717 		}
718 	}
719 	xas_unlock(&xas);
720 
721 	xa_destroy(&encl->page_array);
722 
723 	if (!encl->secs_child_cnt && encl->secs.epc_page) {
724 		sgx_encl_free_epc_page(encl->secs.epc_page);
725 		encl->secs.epc_page = NULL;
726 	}
727 
728 	while (!list_empty(&encl->va_pages)) {
729 		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
730 					   list);
731 		list_del(&va_page->list);
732 		sgx_encl_free_epc_page(va_page->epc_page);
733 		kfree(va_page);
734 	}
735 
736 	if (encl->backing)
737 		fput(encl->backing);
738 
739 	cleanup_srcu_struct(&encl->srcu);
740 
741 	WARN_ON_ONCE(!list_empty(&encl->mm_list));
742 
743 	/* Detect EPC page leak's. */
744 	WARN_ON_ONCE(encl->secs_child_cnt);
745 	WARN_ON_ONCE(encl->secs.epc_page);
746 
747 	kfree(encl);
748 }
749 
750 /*
751  * 'mm' is exiting and no longer needs mmu notifications.
752  */
sgx_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)753 static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
754 				     struct mm_struct *mm)
755 {
756 	struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
757 	struct sgx_encl_mm *tmp = NULL;
758 
759 	/*
760 	 * The enclave itself can remove encl_mm.  Note, objects can't be moved
761 	 * off an RCU protected list, but deletion is ok.
762 	 */
763 	spin_lock(&encl_mm->encl->mm_lock);
764 	list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
765 		if (tmp == encl_mm) {
766 			list_del_rcu(&encl_mm->list);
767 			break;
768 		}
769 	}
770 	spin_unlock(&encl_mm->encl->mm_lock);
771 
772 	if (tmp == encl_mm) {
773 		synchronize_srcu(&encl_mm->encl->srcu);
774 		mmu_notifier_put(mn);
775 	}
776 }
777 
sgx_mmu_notifier_free(struct mmu_notifier * mn)778 static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
779 {
780 	struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
781 
782 	/* 'encl_mm' is going away, put encl_mm->encl reference: */
783 	kref_put(&encl_mm->encl->refcount, sgx_encl_release);
784 
785 	kfree(encl_mm);
786 }
787 
788 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
789 	.release		= sgx_mmu_notifier_release,
790 	.free_notifier		= sgx_mmu_notifier_free,
791 };
792 
sgx_encl_find_mm(struct sgx_encl * encl,struct mm_struct * mm)793 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
794 					    struct mm_struct *mm)
795 {
796 	struct sgx_encl_mm *encl_mm = NULL;
797 	struct sgx_encl_mm *tmp;
798 	int idx;
799 
800 	idx = srcu_read_lock(&encl->srcu);
801 
802 	list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
803 		if (tmp->mm == mm) {
804 			encl_mm = tmp;
805 			break;
806 		}
807 	}
808 
809 	srcu_read_unlock(&encl->srcu, idx);
810 
811 	return encl_mm;
812 }
813 
sgx_encl_mm_add(struct sgx_encl * encl,struct mm_struct * mm)814 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
815 {
816 	struct sgx_encl_mm *encl_mm;
817 	int ret;
818 
819 	/*
820 	 * Even though a single enclave may be mapped into an mm more than once,
821 	 * each 'mm' only appears once on encl->mm_list. This is guaranteed by
822 	 * holding the mm's mmap lock for write before an mm can be added or
823 	 * remove to an encl->mm_list.
824 	 */
825 	mmap_assert_write_locked(mm);
826 
827 	/*
828 	 * It's possible that an entry already exists in the mm_list, because it
829 	 * is removed only on VFS release or process exit.
830 	 */
831 	if (sgx_encl_find_mm(encl, mm))
832 		return 0;
833 
834 	encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
835 	if (!encl_mm)
836 		return -ENOMEM;
837 
838 	/* Grab a refcount for the encl_mm->encl reference: */
839 	kref_get(&encl->refcount);
840 	encl_mm->encl = encl;
841 	encl_mm->mm = mm;
842 	encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
843 
844 	ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
845 	if (ret) {
846 		kfree(encl_mm);
847 		return ret;
848 	}
849 
850 	spin_lock(&encl->mm_lock);
851 	list_add_rcu(&encl_mm->list, &encl->mm_list);
852 	/* Pairs with smp_rmb() in sgx_zap_enclave_ptes(). */
853 	smp_wmb();
854 	encl->mm_list_version++;
855 	spin_unlock(&encl->mm_lock);
856 
857 	return 0;
858 }
859 
860 /**
861  * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
862  * @encl: the enclave
863  *
864  * Some SGX functions require that no cached linear-to-physical address
865  * mappings are present before they can succeed. For example, ENCLS[EWB]
866  * copies a page from the enclave page cache to regular main memory but
867  * it fails if it cannot ensure that there are no cached
868  * linear-to-physical address mappings referring to the page.
869  *
870  * SGX hardware flushes all cached linear-to-physical mappings on a CPU
871  * when an enclave is exited via ENCLU[EEXIT] or an Asynchronous Enclave
872  * Exit (AEX). Exiting an enclave will thus ensure cached linear-to-physical
873  * address mappings are cleared but coordination with the tracking done within
874  * the SGX hardware is needed to support the SGX functions that depend on this
875  * cache clearing.
876  *
877  * When the ENCLS[ETRACK] function is issued on an enclave the hardware
878  * tracks threads operating inside the enclave at that time. The SGX
879  * hardware tracking require that all the identified threads must have
880  * exited the enclave in order to flush the mappings before a function such
881  * as ENCLS[EWB] will be permitted
882  *
883  * The following flow is used to support SGX functions that require that
884  * no cached linear-to-physical address mappings are present:
885  * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
886  * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
887  *    accessing the enclave.
888  * 3) Send IPI to identified CPUs, kicking them out of the enclave and
889  *    thus flushing all locally cached linear-to-physical address mappings.
890  * 4) Execute SGX function.
891  *
892  * Context: It is required to call this function after ENCLS[ETRACK].
893  *          This will ensure that if any new mm appears (racing with
894  *          sgx_encl_mm_add()) then the new mm will enter into the
895  *          enclave with fresh linear-to-physical address mappings.
896  *
897  *          It is required that all IPIs are completed before a new
898  *          ENCLS[ETRACK] is issued so be sure to protect steps 1 to 3
899  *          of the above flow with the enclave's mutex.
900  *
901  * Return: cpumask of CPUs that might be accessing @encl
902  */
sgx_encl_cpumask(struct sgx_encl * encl)903 const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
904 {
905 	cpumask_t *cpumask = &encl->cpumask;
906 	struct sgx_encl_mm *encl_mm;
907 	int idx;
908 
909 	cpumask_clear(cpumask);
910 
911 	idx = srcu_read_lock(&encl->srcu);
912 
913 	list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
914 		if (!mmget_not_zero(encl_mm->mm))
915 			continue;
916 
917 		cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
918 
919 		mmput_async(encl_mm->mm);
920 	}
921 
922 	srcu_read_unlock(&encl->srcu, idx);
923 
924 	return cpumask;
925 }
926 
sgx_encl_get_backing_page(struct sgx_encl * encl,pgoff_t index)927 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
928 					      pgoff_t index)
929 {
930 	struct address_space *mapping = encl->backing->f_mapping;
931 	gfp_t gfpmask = mapping_gfp_mask(mapping);
932 
933 	return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
934 }
935 
936 /**
937  * __sgx_encl_get_backing() - Pin the backing storage
938  * @encl:	an enclave pointer
939  * @page_index:	enclave page index
940  * @backing:	data for accessing backing storage for the page
941  *
942  * Pin the backing storage pages for storing the encrypted contents and Paging
943  * Crypto MetaData (PCMD) of an enclave page.
944  *
945  * Return:
946  *   0 on success,
947  *   -errno otherwise.
948  */
__sgx_encl_get_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)949 static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
950 			 struct sgx_backing *backing)
951 {
952 	pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
953 	struct page *contents;
954 	struct page *pcmd;
955 
956 	contents = sgx_encl_get_backing_page(encl, page_index);
957 	if (IS_ERR(contents))
958 		return PTR_ERR(contents);
959 
960 	pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
961 	if (IS_ERR(pcmd)) {
962 		put_page(contents);
963 		return PTR_ERR(pcmd);
964 	}
965 
966 	backing->contents = contents;
967 	backing->pcmd = pcmd;
968 	backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
969 
970 	return 0;
971 }
972 
973 /*
974  * When called from ksgxd, returns the mem_cgroup of a struct mm stored
975  * in the enclave's mm_list. When not called from ksgxd, just returns
976  * the mem_cgroup of the current task.
977  */
sgx_encl_get_mem_cgroup(struct sgx_encl * encl)978 static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
979 {
980 	struct mem_cgroup *memcg = NULL;
981 	struct sgx_encl_mm *encl_mm;
982 	int idx;
983 
984 	/*
985 	 * If called from normal task context, return the mem_cgroup
986 	 * of the current task's mm. The remainder of the handling is for
987 	 * ksgxd.
988 	 */
989 	if (!current_is_ksgxd())
990 		return get_mem_cgroup_from_mm(current->mm);
991 
992 	/*
993 	 * Search the enclave's mm_list to find an mm associated with
994 	 * this enclave to charge the allocation to.
995 	 */
996 	idx = srcu_read_lock(&encl->srcu);
997 
998 	list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
999 		if (!mmget_not_zero(encl_mm->mm))
1000 			continue;
1001 
1002 		memcg = get_mem_cgroup_from_mm(encl_mm->mm);
1003 
1004 		mmput_async(encl_mm->mm);
1005 
1006 		break;
1007 	}
1008 
1009 	srcu_read_unlock(&encl->srcu, idx);
1010 
1011 	/*
1012 	 * In the rare case that there isn't an mm associated with
1013 	 * the enclave, set memcg to the current active mem_cgroup.
1014 	 * This will be the root mem_cgroup if there is no active
1015 	 * mem_cgroup.
1016 	 */
1017 	if (!memcg)
1018 		return get_mem_cgroup_from_mm(NULL);
1019 
1020 	return memcg;
1021 }
1022 
1023 /**
1024  * sgx_encl_alloc_backing() - create a new backing storage page
1025  * @encl:	an enclave pointer
1026  * @page_index:	enclave page index
1027  * @backing:	data for accessing backing storage for the page
1028  *
1029  * When called from ksgxd, sets the active memcg from one of the
1030  * mms in the enclave's mm_list prior to any backing page allocation,
1031  * in order to ensure that shmem page allocations are charged to the
1032  * enclave.  Create a backing page for loading data back into an EPC page with
1033  * ELDU.  This function takes a reference on a new backing page which
1034  * must be dropped with a corresponding call to sgx_encl_put_backing().
1035  *
1036  * Return:
1037  *   0 on success,
1038  *   -errno otherwise.
1039  */
sgx_encl_alloc_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)1040 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
1041 			   struct sgx_backing *backing)
1042 {
1043 	struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl);
1044 	struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
1045 	int ret;
1046 
1047 	ret = __sgx_encl_get_backing(encl, page_index, backing);
1048 
1049 	set_active_memcg(memcg);
1050 	mem_cgroup_put(encl_memcg);
1051 
1052 	return ret;
1053 }
1054 
1055 /**
1056  * sgx_encl_lookup_backing() - retrieve an existing backing storage page
1057  * @encl:	an enclave pointer
1058  * @page_index:	enclave page index
1059  * @backing:	data for accessing backing storage for the page
1060  *
1061  * Retrieve a backing page for loading data back into an EPC page with ELDU.
1062  * It is the caller's responsibility to ensure that it is appropriate to use
1063  * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is
1064  * not used correctly, this will cause an allocation which is not accounted for.
1065  * This function takes a reference on an existing backing page which must be
1066  * dropped with a corresponding call to sgx_encl_put_backing().
1067  *
1068  * Return:
1069  *   0 on success,
1070  *   -errno otherwise.
1071  */
sgx_encl_lookup_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)1072 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
1073 			   struct sgx_backing *backing)
1074 {
1075 	return __sgx_encl_get_backing(encl, page_index, backing);
1076 }
1077 
1078 /**
1079  * sgx_encl_put_backing() - Unpin the backing storage
1080  * @backing:	data for accessing backing storage for the page
1081  */
sgx_encl_put_backing(struct sgx_backing * backing)1082 void sgx_encl_put_backing(struct sgx_backing *backing)
1083 {
1084 	put_page(backing->pcmd);
1085 	put_page(backing->contents);
1086 }
1087 
sgx_encl_test_and_clear_young_cb(pte_t * ptep,unsigned long addr,void * data)1088 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr,
1089 					    void *data)
1090 {
1091 	pte_t pte;
1092 	int ret;
1093 
1094 	ret = pte_young(*ptep);
1095 	if (ret) {
1096 		pte = pte_mkold(*ptep);
1097 		set_pte_at((struct mm_struct *)data, addr, ptep, pte);
1098 	}
1099 
1100 	return ret;
1101 }
1102 
1103 /**
1104  * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
1105  * @mm:		mm_struct that is checked
1106  * @page:	enclave page to be tested for recent access
1107  *
1108  * Checks the Access (A) bit from the PTE corresponding to the enclave page and
1109  * clears it.
1110  *
1111  * Return: 1 if the page has been recently accessed and 0 if not.
1112  */
sgx_encl_test_and_clear_young(struct mm_struct * mm,struct sgx_encl_page * page)1113 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
1114 				  struct sgx_encl_page *page)
1115 {
1116 	unsigned long addr = page->desc & PAGE_MASK;
1117 	struct sgx_encl *encl = page->encl;
1118 	struct vm_area_struct *vma;
1119 	int ret;
1120 
1121 	ret = sgx_encl_find(mm, addr, &vma);
1122 	if (ret)
1123 		return 0;
1124 
1125 	if (encl != vma->vm_private_data)
1126 		return 0;
1127 
1128 	ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
1129 				  sgx_encl_test_and_clear_young_cb, vma->vm_mm);
1130 	if (ret < 0)
1131 		return 0;
1132 
1133 	return ret;
1134 }
1135 
sgx_encl_page_alloc(struct sgx_encl * encl,unsigned long offset,u64 secinfo_flags)1136 struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
1137 					  unsigned long offset,
1138 					  u64 secinfo_flags)
1139 {
1140 	struct sgx_encl_page *encl_page;
1141 	unsigned long prot;
1142 
1143 	encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
1144 	if (!encl_page)
1145 		return ERR_PTR(-ENOMEM);
1146 
1147 	encl_page->desc = encl->base + offset;
1148 	encl_page->encl = encl;
1149 
1150 	prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ)  |
1151 	       _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
1152 	       _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
1153 
1154 	/*
1155 	 * TCS pages must always RW set for CPU access while the SECINFO
1156 	 * permissions are *always* zero - the CPU ignores the user provided
1157 	 * values and silently overwrites them with zero permissions.
1158 	 */
1159 	if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
1160 		prot |= PROT_READ | PROT_WRITE;
1161 
1162 	/* Calculate maximum of the VM flags for the page. */
1163 	encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
1164 
1165 	return encl_page;
1166 }
1167 
1168 /**
1169  * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave
1170  * @encl: the enclave
1171  * @addr: page aligned pointer to single page for which PTEs will be removed
1172  *
1173  * Multiple VMAs may have an enclave page mapped. Remove the PTE mapping
1174  * @addr from each VMA. Ensure that page fault handler is ready to handle
1175  * new mappings of @addr before calling this function.
1176  */
sgx_zap_enclave_ptes(struct sgx_encl * encl,unsigned long addr)1177 void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
1178 {
1179 	unsigned long mm_list_version;
1180 	struct sgx_encl_mm *encl_mm;
1181 	struct vm_area_struct *vma;
1182 	int idx, ret;
1183 
1184 	do {
1185 		mm_list_version = encl->mm_list_version;
1186 
1187 		/* Pairs with smp_wmb() in sgx_encl_mm_add(). */
1188 		smp_rmb();
1189 
1190 		idx = srcu_read_lock(&encl->srcu);
1191 
1192 		list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
1193 			if (!mmget_not_zero(encl_mm->mm))
1194 				continue;
1195 
1196 			mmap_read_lock(encl_mm->mm);
1197 
1198 			ret = sgx_encl_find(encl_mm->mm, addr, &vma);
1199 			if (!ret && encl == vma->vm_private_data)
1200 				zap_vma_ptes(vma, addr, PAGE_SIZE);
1201 
1202 			mmap_read_unlock(encl_mm->mm);
1203 
1204 			mmput_async(encl_mm->mm);
1205 		}
1206 
1207 		srcu_read_unlock(&encl->srcu, idx);
1208 	} while (unlikely(encl->mm_list_version != mm_list_version));
1209 }
1210 
1211 /**
1212  * sgx_alloc_va_page() - Allocate a Version Array (VA) page
1213  * @reclaim: Reclaim EPC pages directly if none available. Enclave
1214  *           mutex should not be held if this is set.
1215  *
1216  * Allocate a free EPC page and convert it to a Version Array (VA) page.
1217  *
1218  * Return:
1219  *   a VA page,
1220  *   -errno otherwise
1221  */
sgx_alloc_va_page(bool reclaim)1222 struct sgx_epc_page *sgx_alloc_va_page(bool reclaim)
1223 {
1224 	struct sgx_epc_page *epc_page;
1225 	int ret;
1226 
1227 	epc_page = sgx_alloc_epc_page(NULL, reclaim);
1228 	if (IS_ERR(epc_page))
1229 		return ERR_CAST(epc_page);
1230 
1231 	ret = __epa(sgx_get_epc_virt_addr(epc_page));
1232 	if (ret) {
1233 		WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
1234 		sgx_encl_free_epc_page(epc_page);
1235 		return ERR_PTR(-EFAULT);
1236 	}
1237 
1238 	return epc_page;
1239 }
1240 
1241 /**
1242  * sgx_alloc_va_slot - allocate a VA slot
1243  * @va_page:	a &struct sgx_va_page instance
1244  *
1245  * Allocates a slot from a &struct sgx_va_page instance.
1246  *
1247  * Return: offset of the slot inside the VA page
1248  */
sgx_alloc_va_slot(struct sgx_va_page * va_page)1249 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
1250 {
1251 	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1252 
1253 	if (slot < SGX_VA_SLOT_COUNT)
1254 		set_bit(slot, va_page->slots);
1255 
1256 	return slot << 3;
1257 }
1258 
1259 /**
1260  * sgx_free_va_slot - free a VA slot
1261  * @va_page:	a &struct sgx_va_page instance
1262  * @offset:	offset of the slot inside the VA page
1263  *
1264  * Frees a slot from a &struct sgx_va_page instance.
1265  */
sgx_free_va_slot(struct sgx_va_page * va_page,unsigned int offset)1266 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
1267 {
1268 	clear_bit(offset >> 3, va_page->slots);
1269 }
1270 
1271 /**
1272  * sgx_va_page_full - is the VA page full?
1273  * @va_page:	a &struct sgx_va_page instance
1274  *
1275  * Return: true if all slots have been taken
1276  */
sgx_va_page_full(struct sgx_va_page * va_page)1277 bool sgx_va_page_full(struct sgx_va_page *va_page)
1278 {
1279 	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1280 
1281 	return slot == SGX_VA_SLOT_COUNT;
1282 }
1283 
1284 /**
1285  * sgx_encl_free_epc_page - free an EPC page assigned to an enclave
1286  * @page:	EPC page to be freed
1287  *
1288  * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and
1289  * only upon success, it puts the page back to free page list.  Otherwise, it
1290  * gives a WARNING to indicate page is leaked.
1291  */
sgx_encl_free_epc_page(struct sgx_epc_page * page)1292 void sgx_encl_free_epc_page(struct sgx_epc_page *page)
1293 {
1294 	int ret;
1295 
1296 	WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
1297 
1298 	ret = __eremove(sgx_get_epc_virt_addr(page));
1299 	if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret))
1300 		return;
1301 
1302 	sgx_free_epc_page(page);
1303 }
1304