1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
3
4 #include <linux/file.h>
5 #include <linux/freezer.h>
6 #include <linux/highmem.h>
7 #include <linux/kthread.h>
8 #include <linux/miscdevice.h>
9 #include <linux/node.h>
10 #include <linux/pagemap.h>
11 #include <linux/ratelimit.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/sysfs.h>
16 #include <asm/sgx.h>
17 #include "driver.h"
18 #include "encl.h"
19 #include "encls.h"
20
21 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
22 static int sgx_nr_epc_sections;
23 static struct task_struct *ksgxd_tsk;
24 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
25 static DEFINE_XARRAY(sgx_epc_address_space);
26
27 /*
28 * These variables are part of the state of the reclaimer, and must be accessed
29 * with sgx_reclaimer_lock acquired.
30 */
31 static LIST_HEAD(sgx_active_page_list);
32 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
33
34 static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
35
36 /* Nodes with one or more EPC sections. */
37 static nodemask_t sgx_numa_mask;
38
39 /*
40 * Array with one list_head for each possible NUMA node. Each
41 * list contains all the sgx_epc_section's which are on that
42 * node.
43 */
44 static struct sgx_numa_node *sgx_numa_nodes;
45
46 static LIST_HEAD(sgx_dirty_page_list);
47
48 /*
49 * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
50 * from the input list, and made available for the page allocator. SECS pages
51 * prepending their children in the input list are left intact.
52 */
__sgx_sanitize_pages(struct list_head * dirty_page_list)53 static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
54 {
55 struct sgx_epc_page *page;
56 LIST_HEAD(dirty);
57 int ret;
58
59 /* dirty_page_list is thread-local, no need for a lock: */
60 while (!list_empty(dirty_page_list)) {
61 if (kthread_should_stop())
62 return;
63
64 page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
65
66 /*
67 * Checking page->poison without holding the node->lock
68 * is racy, but losing the race (i.e. poison is set just
69 * after the check) just means __eremove() will be uselessly
70 * called for a page that sgx_free_epc_page() will put onto
71 * the node->sgx_poison_page_list later.
72 */
73 if (page->poison) {
74 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
75 struct sgx_numa_node *node = section->node;
76
77 spin_lock(&node->lock);
78 list_move(&page->list, &node->sgx_poison_page_list);
79 spin_unlock(&node->lock);
80
81 continue;
82 }
83
84 ret = __eremove(sgx_get_epc_virt_addr(page));
85 if (!ret) {
86 /*
87 * page is now sanitized. Make it available via the SGX
88 * page allocator:
89 */
90 list_del(&page->list);
91 sgx_free_epc_page(page);
92 } else {
93 /* The page is not yet clean - move to the dirty list. */
94 list_move_tail(&page->list, &dirty);
95 }
96
97 cond_resched();
98 }
99
100 list_splice(&dirty, dirty_page_list);
101 }
102
sgx_reclaimer_age(struct sgx_epc_page * epc_page)103 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
104 {
105 struct sgx_encl_page *page = epc_page->owner;
106 struct sgx_encl *encl = page->encl;
107 struct sgx_encl_mm *encl_mm;
108 bool ret = true;
109 int idx;
110
111 idx = srcu_read_lock(&encl->srcu);
112
113 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
114 if (!mmget_not_zero(encl_mm->mm))
115 continue;
116
117 mmap_read_lock(encl_mm->mm);
118 ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
119 mmap_read_unlock(encl_mm->mm);
120
121 mmput_async(encl_mm->mm);
122
123 if (!ret)
124 break;
125 }
126
127 srcu_read_unlock(&encl->srcu, idx);
128
129 if (!ret)
130 return false;
131
132 return true;
133 }
134
sgx_reclaimer_block(struct sgx_epc_page * epc_page)135 static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
136 {
137 struct sgx_encl_page *page = epc_page->owner;
138 unsigned long addr = page->desc & PAGE_MASK;
139 struct sgx_encl *encl = page->encl;
140 unsigned long mm_list_version;
141 struct sgx_encl_mm *encl_mm;
142 struct vm_area_struct *vma;
143 int idx, ret;
144
145 do {
146 mm_list_version = encl->mm_list_version;
147
148 /* Pairs with smp_rmb() in sgx_encl_mm_add(). */
149 smp_rmb();
150
151 idx = srcu_read_lock(&encl->srcu);
152
153 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
154 if (!mmget_not_zero(encl_mm->mm))
155 continue;
156
157 mmap_read_lock(encl_mm->mm);
158
159 ret = sgx_encl_find(encl_mm->mm, addr, &vma);
160 if (!ret && encl == vma->vm_private_data)
161 zap_vma_ptes(vma, addr, PAGE_SIZE);
162
163 mmap_read_unlock(encl_mm->mm);
164
165 mmput_async(encl_mm->mm);
166 }
167
168 srcu_read_unlock(&encl->srcu, idx);
169 } while (unlikely(encl->mm_list_version != mm_list_version));
170
171 mutex_lock(&encl->lock);
172
173 ret = __eblock(sgx_get_epc_virt_addr(epc_page));
174 if (encls_failed(ret))
175 ENCLS_WARN(ret, "EBLOCK");
176
177 mutex_unlock(&encl->lock);
178 }
179
__sgx_encl_ewb(struct sgx_epc_page * epc_page,void * va_slot,struct sgx_backing * backing)180 static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
181 struct sgx_backing *backing)
182 {
183 struct sgx_pageinfo pginfo;
184 int ret;
185
186 pginfo.addr = 0;
187 pginfo.secs = 0;
188
189 pginfo.contents = (unsigned long)kmap_atomic(backing->contents);
190 pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) +
191 backing->pcmd_offset;
192
193 ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
194 set_page_dirty(backing->pcmd);
195 set_page_dirty(backing->contents);
196
197 kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
198 backing->pcmd_offset));
199 kunmap_atomic((void *)(unsigned long)pginfo.contents);
200
201 return ret;
202 }
203
sgx_ipi_cb(void * info)204 static void sgx_ipi_cb(void *info)
205 {
206 }
207
sgx_encl_ewb_cpumask(struct sgx_encl * encl)208 static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
209 {
210 cpumask_t *cpumask = &encl->cpumask;
211 struct sgx_encl_mm *encl_mm;
212 int idx;
213
214 /*
215 * Can race with sgx_encl_mm_add(), but ETRACK has already been
216 * executed, which means that the CPUs running in the new mm will enter
217 * into the enclave with a fresh epoch.
218 */
219 cpumask_clear(cpumask);
220
221 idx = srcu_read_lock(&encl->srcu);
222
223 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
224 if (!mmget_not_zero(encl_mm->mm))
225 continue;
226
227 cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
228
229 mmput_async(encl_mm->mm);
230 }
231
232 srcu_read_unlock(&encl->srcu, idx);
233
234 return cpumask;
235 }
236
237 /*
238 * Swap page to the regular memory transformed to the blocked state by using
239 * EBLOCK, which means that it can no longer be referenced (no new TLB entries).
240 *
241 * The first trial just tries to write the page assuming that some other thread
242 * has reset the count for threads inside the enclave by using ETRACK, and
243 * previous thread count has been zeroed out. The second trial calls ETRACK
244 * before EWB. If that fails we kick all the HW threads out, and then do EWB,
245 * which should be guaranteed the succeed.
246 */
sgx_encl_ewb(struct sgx_epc_page * epc_page,struct sgx_backing * backing)247 static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
248 struct sgx_backing *backing)
249 {
250 struct sgx_encl_page *encl_page = epc_page->owner;
251 struct sgx_encl *encl = encl_page->encl;
252 struct sgx_va_page *va_page;
253 unsigned int va_offset;
254 void *va_slot;
255 int ret;
256
257 encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;
258
259 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
260 list);
261 va_offset = sgx_alloc_va_slot(va_page);
262 va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
263 if (sgx_va_page_full(va_page))
264 list_move_tail(&va_page->list, &encl->va_pages);
265
266 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
267 if (ret == SGX_NOT_TRACKED) {
268 ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page));
269 if (ret) {
270 if (encls_failed(ret))
271 ENCLS_WARN(ret, "ETRACK");
272 }
273
274 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
275 if (ret == SGX_NOT_TRACKED) {
276 /*
277 * Slow path, send IPIs to kick cpus out of the
278 * enclave. Note, it's imperative that the cpu
279 * mask is generated *after* ETRACK, else we'll
280 * miss cpus that entered the enclave between
281 * generating the mask and incrementing epoch.
282 */
283 on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
284 sgx_ipi_cb, NULL, 1);
285 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
286 }
287 }
288
289 if (ret) {
290 if (encls_failed(ret))
291 ENCLS_WARN(ret, "EWB");
292
293 sgx_free_va_slot(va_page, va_offset);
294 } else {
295 encl_page->desc |= va_offset;
296 encl_page->va_page = va_page;
297 }
298 }
299
sgx_reclaimer_write(struct sgx_epc_page * epc_page,struct sgx_backing * backing)300 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
301 struct sgx_backing *backing)
302 {
303 struct sgx_encl_page *encl_page = epc_page->owner;
304 struct sgx_encl *encl = encl_page->encl;
305 struct sgx_backing secs_backing;
306 int ret;
307
308 mutex_lock(&encl->lock);
309
310 sgx_encl_ewb(epc_page, backing);
311 encl_page->epc_page = NULL;
312 encl->secs_child_cnt--;
313 sgx_encl_put_backing(backing);
314
315 if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
316 ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size),
317 &secs_backing);
318 if (ret)
319 goto out;
320
321 sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
322
323 sgx_encl_free_epc_page(encl->secs.epc_page);
324 encl->secs.epc_page = NULL;
325
326 sgx_encl_put_backing(&secs_backing);
327 }
328
329 out:
330 mutex_unlock(&encl->lock);
331 }
332
333 /*
334 * Take a fixed number of pages from the head of the active page pool and
335 * reclaim them to the enclave's private shmem files. Skip the pages, which have
336 * been accessed since the last scan. Move those pages to the tail of active
337 * page pool so that the pages get scanned in LRU like fashion.
338 *
339 * Batch process a chunk of pages (at the moment 16) in order to degrade amount
340 * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit
341 * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI
342 * + EWB) but not sufficiently. Reclaiming one page at a time would also be
343 * problematic as it would increase the lock contention too much, which would
344 * halt forward progress.
345 */
sgx_reclaim_pages(void)346 static void sgx_reclaim_pages(void)
347 {
348 struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
349 struct sgx_backing backing[SGX_NR_TO_SCAN];
350 struct sgx_encl_page *encl_page;
351 struct sgx_epc_page *epc_page;
352 pgoff_t page_index;
353 int cnt = 0;
354 int ret;
355 int i;
356
357 spin_lock(&sgx_reclaimer_lock);
358 for (i = 0; i < SGX_NR_TO_SCAN; i++) {
359 if (list_empty(&sgx_active_page_list))
360 break;
361
362 epc_page = list_first_entry(&sgx_active_page_list,
363 struct sgx_epc_page, list);
364 list_del_init(&epc_page->list);
365 encl_page = epc_page->owner;
366
367 if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
368 chunk[cnt++] = epc_page;
369 else
370 /* The owner is freeing the page. No need to add the
371 * page back to the list of reclaimable pages.
372 */
373 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
374 }
375 spin_unlock(&sgx_reclaimer_lock);
376
377 for (i = 0; i < cnt; i++) {
378 epc_page = chunk[i];
379 encl_page = epc_page->owner;
380
381 if (!sgx_reclaimer_age(epc_page))
382 goto skip;
383
384 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
385
386 mutex_lock(&encl_page->encl->lock);
387 ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]);
388 if (ret) {
389 mutex_unlock(&encl_page->encl->lock);
390 goto skip;
391 }
392
393 encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
394 mutex_unlock(&encl_page->encl->lock);
395 continue;
396
397 skip:
398 spin_lock(&sgx_reclaimer_lock);
399 list_add_tail(&epc_page->list, &sgx_active_page_list);
400 spin_unlock(&sgx_reclaimer_lock);
401
402 kref_put(&encl_page->encl->refcount, sgx_encl_release);
403
404 chunk[i] = NULL;
405 }
406
407 for (i = 0; i < cnt; i++) {
408 epc_page = chunk[i];
409 if (epc_page)
410 sgx_reclaimer_block(epc_page);
411 }
412
413 for (i = 0; i < cnt; i++) {
414 epc_page = chunk[i];
415 if (!epc_page)
416 continue;
417
418 encl_page = epc_page->owner;
419 sgx_reclaimer_write(epc_page, &backing[i]);
420
421 kref_put(&encl_page->encl->refcount, sgx_encl_release);
422 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
423
424 sgx_free_epc_page(epc_page);
425 }
426 }
427
sgx_should_reclaim(unsigned long watermark)428 static bool sgx_should_reclaim(unsigned long watermark)
429 {
430 return atomic_long_read(&sgx_nr_free_pages) < watermark &&
431 !list_empty(&sgx_active_page_list);
432 }
433
ksgxd(void * p)434 static int ksgxd(void *p)
435 {
436 set_freezable();
437
438 /*
439 * Sanitize pages in order to recover from kexec(). The 2nd pass is
440 * required for SECS pages, whose child pages blocked EREMOVE.
441 */
442 __sgx_sanitize_pages(&sgx_dirty_page_list);
443 __sgx_sanitize_pages(&sgx_dirty_page_list);
444
445 /* sanity check: */
446 WARN_ON(!list_empty(&sgx_dirty_page_list));
447
448 while (!kthread_should_stop()) {
449 if (try_to_freeze())
450 continue;
451
452 wait_event_freezable(ksgxd_waitq,
453 kthread_should_stop() ||
454 sgx_should_reclaim(SGX_NR_HIGH_PAGES));
455
456 if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
457 sgx_reclaim_pages();
458
459 cond_resched();
460 }
461
462 return 0;
463 }
464
sgx_page_reclaimer_init(void)465 static bool __init sgx_page_reclaimer_init(void)
466 {
467 struct task_struct *tsk;
468
469 tsk = kthread_run(ksgxd, NULL, "ksgxd");
470 if (IS_ERR(tsk))
471 return false;
472
473 ksgxd_tsk = tsk;
474
475 return true;
476 }
477
current_is_ksgxd(void)478 bool current_is_ksgxd(void)
479 {
480 return current == ksgxd_tsk;
481 }
482
__sgx_alloc_epc_page_from_node(int nid)483 static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
484 {
485 struct sgx_numa_node *node = &sgx_numa_nodes[nid];
486 struct sgx_epc_page *page = NULL;
487
488 spin_lock(&node->lock);
489
490 if (list_empty(&node->free_page_list)) {
491 spin_unlock(&node->lock);
492 return NULL;
493 }
494
495 page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
496 list_del_init(&page->list);
497 page->flags = 0;
498
499 spin_unlock(&node->lock);
500 atomic_long_dec(&sgx_nr_free_pages);
501
502 return page;
503 }
504
505 /**
506 * __sgx_alloc_epc_page() - Allocate an EPC page
507 *
508 * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start
509 * from the NUMA node, where the caller is executing.
510 *
511 * Return:
512 * - an EPC page: A borrowed EPC pages were available.
513 * - NULL: Out of EPC pages.
514 */
__sgx_alloc_epc_page(void)515 struct sgx_epc_page *__sgx_alloc_epc_page(void)
516 {
517 struct sgx_epc_page *page;
518 int nid_of_current = numa_node_id();
519 int nid = nid_of_current;
520
521 if (node_isset(nid_of_current, sgx_numa_mask)) {
522 page = __sgx_alloc_epc_page_from_node(nid_of_current);
523 if (page)
524 return page;
525 }
526
527 /* Fall back to the non-local NUMA nodes: */
528 while (true) {
529 nid = next_node_in(nid, sgx_numa_mask);
530 if (nid == nid_of_current)
531 break;
532
533 page = __sgx_alloc_epc_page_from_node(nid);
534 if (page)
535 return page;
536 }
537
538 return ERR_PTR(-ENOMEM);
539 }
540
541 /**
542 * sgx_mark_page_reclaimable() - Mark a page as reclaimable
543 * @page: EPC page
544 *
545 * Mark a page as reclaimable and add it to the active page list. Pages
546 * are automatically removed from the active list when freed.
547 */
sgx_mark_page_reclaimable(struct sgx_epc_page * page)548 void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
549 {
550 spin_lock(&sgx_reclaimer_lock);
551 page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
552 list_add_tail(&page->list, &sgx_active_page_list);
553 spin_unlock(&sgx_reclaimer_lock);
554 }
555
556 /**
557 * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
558 * @page: EPC page
559 *
560 * Clear the reclaimable flag and remove the page from the active page list.
561 *
562 * Return:
563 * 0 on success,
564 * -EBUSY if the page is in the process of being reclaimed
565 */
sgx_unmark_page_reclaimable(struct sgx_epc_page * page)566 int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
567 {
568 spin_lock(&sgx_reclaimer_lock);
569 if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
570 /* The page is being reclaimed. */
571 if (list_empty(&page->list)) {
572 spin_unlock(&sgx_reclaimer_lock);
573 return -EBUSY;
574 }
575
576 list_del(&page->list);
577 page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
578 }
579 spin_unlock(&sgx_reclaimer_lock);
580
581 return 0;
582 }
583
584 /**
585 * sgx_alloc_epc_page() - Allocate an EPC page
586 * @owner: the owner of the EPC page
587 * @reclaim: reclaim pages if necessary
588 *
589 * Iterate through EPC sections and borrow a free EPC page to the caller. When a
590 * page is no longer needed it must be released with sgx_free_epc_page(). If
591 * @reclaim is set to true, directly reclaim pages when we are out of pages. No
592 * mm's can be locked when @reclaim is set to true.
593 *
594 * Finally, wake up ksgxd when the number of pages goes below the watermark
595 * before returning back to the caller.
596 *
597 * Return:
598 * an EPC page,
599 * -errno on error
600 */
sgx_alloc_epc_page(void * owner,bool reclaim)601 struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
602 {
603 struct sgx_epc_page *page;
604
605 for ( ; ; ) {
606 page = __sgx_alloc_epc_page();
607 if (!IS_ERR(page)) {
608 page->owner = owner;
609 break;
610 }
611
612 if (list_empty(&sgx_active_page_list))
613 return ERR_PTR(-ENOMEM);
614
615 if (!reclaim) {
616 page = ERR_PTR(-EBUSY);
617 break;
618 }
619
620 if (signal_pending(current)) {
621 page = ERR_PTR(-ERESTARTSYS);
622 break;
623 }
624
625 sgx_reclaim_pages();
626 cond_resched();
627 }
628
629 if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
630 wake_up(&ksgxd_waitq);
631
632 return page;
633 }
634
635 /**
636 * sgx_free_epc_page() - Free an EPC page
637 * @page: an EPC page
638 *
639 * Put the EPC page back to the list of free pages. It's the caller's
640 * responsibility to make sure that the page is in uninitialized state. In other
641 * words, do EREMOVE, EWB or whatever operation is necessary before calling
642 * this function.
643 */
sgx_free_epc_page(struct sgx_epc_page * page)644 void sgx_free_epc_page(struct sgx_epc_page *page)
645 {
646 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
647 struct sgx_numa_node *node = section->node;
648
649 spin_lock(&node->lock);
650
651 page->owner = NULL;
652 if (page->poison)
653 list_add(&page->list, &node->sgx_poison_page_list);
654 else
655 list_add_tail(&page->list, &node->free_page_list);
656 page->flags = SGX_EPC_PAGE_IS_FREE;
657
658 spin_unlock(&node->lock);
659 atomic_long_inc(&sgx_nr_free_pages);
660 }
661
sgx_setup_epc_section(u64 phys_addr,u64 size,unsigned long index,struct sgx_epc_section * section)662 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
663 unsigned long index,
664 struct sgx_epc_section *section)
665 {
666 unsigned long nr_pages = size >> PAGE_SHIFT;
667 unsigned long i;
668
669 section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
670 if (!section->virt_addr)
671 return false;
672
673 section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
674 if (!section->pages) {
675 memunmap(section->virt_addr);
676 return false;
677 }
678
679 section->phys_addr = phys_addr;
680 xa_store_range(&sgx_epc_address_space, section->phys_addr,
681 phys_addr + size - 1, section, GFP_KERNEL);
682
683 for (i = 0; i < nr_pages; i++) {
684 section->pages[i].section = index;
685 section->pages[i].flags = 0;
686 section->pages[i].owner = NULL;
687 section->pages[i].poison = 0;
688 list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list);
689 }
690
691 return true;
692 }
693
arch_is_platform_page(u64 paddr)694 bool arch_is_platform_page(u64 paddr)
695 {
696 return !!xa_load(&sgx_epc_address_space, paddr);
697 }
698 EXPORT_SYMBOL_GPL(arch_is_platform_page);
699
sgx_paddr_to_page(u64 paddr)700 static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr)
701 {
702 struct sgx_epc_section *section;
703
704 section = xa_load(&sgx_epc_address_space, paddr);
705 if (!section)
706 return NULL;
707
708 return §ion->pages[PFN_DOWN(paddr - section->phys_addr)];
709 }
710
711 /*
712 * Called in process context to handle a hardware reported
713 * error in an SGX EPC page.
714 * If the MF_ACTION_REQUIRED bit is set in flags, then the
715 * context is the task that consumed the poison data. Otherwise
716 * this is called from a kernel thread unrelated to the page.
717 */
arch_memory_failure(unsigned long pfn,int flags)718 int arch_memory_failure(unsigned long pfn, int flags)
719 {
720 struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT);
721 struct sgx_epc_section *section;
722 struct sgx_numa_node *node;
723
724 /*
725 * mm/memory-failure.c calls this routine for all errors
726 * where there isn't a "struct page" for the address. But that
727 * includes other address ranges besides SGX.
728 */
729 if (!page)
730 return -ENXIO;
731
732 /*
733 * If poison was consumed synchronously. Send a SIGBUS to
734 * the task. Hardware has already exited the SGX enclave and
735 * will not allow re-entry to an enclave that has a memory
736 * error. The signal may help the task understand why the
737 * enclave is broken.
738 */
739 if (flags & MF_ACTION_REQUIRED)
740 force_sig(SIGBUS);
741
742 section = &sgx_epc_sections[page->section];
743 node = section->node;
744
745 spin_lock(&node->lock);
746
747 /* Already poisoned? Nothing more to do */
748 if (page->poison)
749 goto out;
750
751 page->poison = 1;
752
753 /*
754 * If the page is on a free list, move it to the per-node
755 * poison page list.
756 */
757 if (page->flags & SGX_EPC_PAGE_IS_FREE) {
758 list_move(&page->list, &node->sgx_poison_page_list);
759 goto out;
760 }
761
762 /*
763 * TBD: Add additional plumbing to enable pre-emptive
764 * action for asynchronous poison notification. Until
765 * then just hope that the poison:
766 * a) is not accessed - sgx_free_epc_page() will deal with it
767 * when the user gives it back
768 * b) results in a recoverable machine check rather than
769 * a fatal one
770 */
771 out:
772 spin_unlock(&node->lock);
773 return 0;
774 }
775
776 /**
777 * A section metric is concatenated in a way that @low bits 12-31 define the
778 * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
779 * metric.
780 */
sgx_calc_section_metric(u64 low,u64 high)781 static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
782 {
783 return (low & GENMASK_ULL(31, 12)) +
784 ((high & GENMASK_ULL(19, 0)) << 32);
785 }
786
787 #ifdef CONFIG_NUMA
sgx_total_bytes_show(struct device * dev,struct device_attribute * attr,char * buf)788 static ssize_t sgx_total_bytes_show(struct device *dev, struct device_attribute *attr, char *buf)
789 {
790 return sysfs_emit(buf, "%lu\n", sgx_numa_nodes[dev->id].size);
791 }
792 static DEVICE_ATTR_RO(sgx_total_bytes);
793
arch_node_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)794 static umode_t arch_node_attr_is_visible(struct kobject *kobj,
795 struct attribute *attr, int idx)
796 {
797 /* Make all x86/ attributes invisible when SGX is not initialized: */
798 if (nodes_empty(sgx_numa_mask))
799 return 0;
800
801 return attr->mode;
802 }
803
804 static struct attribute *arch_node_dev_attrs[] = {
805 &dev_attr_sgx_total_bytes.attr,
806 NULL,
807 };
808
809 const struct attribute_group arch_node_dev_group = {
810 .name = "x86",
811 .attrs = arch_node_dev_attrs,
812 .is_visible = arch_node_attr_is_visible,
813 };
814
arch_update_sysfs_visibility(int nid)815 static void __init arch_update_sysfs_visibility(int nid)
816 {
817 struct node *node = node_devices[nid];
818 int ret;
819
820 ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group);
821
822 if (ret)
823 pr_err("sysfs update failed (%d), files may be invisible", ret);
824 }
825 #else /* !CONFIG_NUMA */
arch_update_sysfs_visibility(int nid)826 static void __init arch_update_sysfs_visibility(int nid) {}
827 #endif
828
sgx_page_cache_init(void)829 static bool __init sgx_page_cache_init(void)
830 {
831 u32 eax, ebx, ecx, edx, type;
832 u64 pa, size;
833 int nid;
834 int i;
835
836 sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
837 if (!sgx_numa_nodes)
838 return false;
839
840 for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
841 cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
842
843 type = eax & SGX_CPUID_EPC_MASK;
844 if (type == SGX_CPUID_EPC_INVALID)
845 break;
846
847 if (type != SGX_CPUID_EPC_SECTION) {
848 pr_err_once("Unknown EPC section type: %u\n", type);
849 break;
850 }
851
852 pa = sgx_calc_section_metric(eax, ebx);
853 size = sgx_calc_section_metric(ecx, edx);
854
855 pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
856
857 if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
858 pr_err("No free memory for an EPC section\n");
859 break;
860 }
861
862 nid = numa_map_to_online_node(phys_to_target_node(pa));
863 if (nid == NUMA_NO_NODE) {
864 /* The physical address is already printed above. */
865 pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
866 nid = 0;
867 }
868
869 if (!node_isset(nid, sgx_numa_mask)) {
870 spin_lock_init(&sgx_numa_nodes[nid].lock);
871 INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
872 INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list);
873 node_set(nid, sgx_numa_mask);
874 sgx_numa_nodes[nid].size = 0;
875
876 /* Make SGX-specific node sysfs files visible: */
877 arch_update_sysfs_visibility(nid);
878 }
879
880 sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
881 sgx_numa_nodes[nid].size += size;
882
883 sgx_nr_epc_sections++;
884 }
885
886 if (!sgx_nr_epc_sections) {
887 pr_err("There are zero EPC sections.\n");
888 return false;
889 }
890
891 return true;
892 }
893
894 /*
895 * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller.
896 * Bare-metal driver requires to update them to hash of enclave's signer
897 * before EINIT. KVM needs to update them to guest's virtual MSR values
898 * before doing EINIT from guest.
899 */
sgx_update_lepubkeyhash(u64 * lepubkeyhash)900 void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
901 {
902 int i;
903
904 WARN_ON_ONCE(preemptible());
905
906 for (i = 0; i < 4; i++)
907 wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
908 }
909
910 const struct file_operations sgx_provision_fops = {
911 .owner = THIS_MODULE,
912 };
913
914 static struct miscdevice sgx_dev_provision = {
915 .minor = MISC_DYNAMIC_MINOR,
916 .name = "sgx_provision",
917 .nodename = "sgx_provision",
918 .fops = &sgx_provision_fops,
919 };
920
921 /**
922 * sgx_set_attribute() - Update allowed attributes given file descriptor
923 * @allowed_attributes: Pointer to allowed enclave attributes
924 * @attribute_fd: File descriptor for specific attribute
925 *
926 * Append enclave attribute indicated by file descriptor to allowed
927 * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by
928 * /dev/sgx_provision is supported.
929 *
930 * Return:
931 * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes
932 * -EINVAL: Invalid, or not supported file descriptor
933 */
sgx_set_attribute(unsigned long * allowed_attributes,unsigned int attribute_fd)934 int sgx_set_attribute(unsigned long *allowed_attributes,
935 unsigned int attribute_fd)
936 {
937 struct file *file;
938
939 file = fget(attribute_fd);
940 if (!file)
941 return -EINVAL;
942
943 if (file->f_op != &sgx_provision_fops) {
944 fput(file);
945 return -EINVAL;
946 }
947
948 *allowed_attributes |= SGX_ATTR_PROVISIONKEY;
949
950 fput(file);
951 return 0;
952 }
953 EXPORT_SYMBOL_GPL(sgx_set_attribute);
954
sgx_init(void)955 static int __init sgx_init(void)
956 {
957 int ret;
958 int i;
959
960 if (!cpu_feature_enabled(X86_FEATURE_SGX))
961 return -ENODEV;
962
963 if (!sgx_page_cache_init())
964 return -ENOMEM;
965
966 if (!sgx_page_reclaimer_init()) {
967 ret = -ENOMEM;
968 goto err_page_cache;
969 }
970
971 ret = misc_register(&sgx_dev_provision);
972 if (ret)
973 goto err_kthread;
974
975 /*
976 * Always try to initialize the native *and* KVM drivers.
977 * The KVM driver is less picky than the native one and
978 * can function if the native one is not supported on the
979 * current system or fails to initialize.
980 *
981 * Error out only if both fail to initialize.
982 */
983 ret = sgx_drv_init();
984
985 if (sgx_vepc_init() && ret)
986 goto err_provision;
987
988 return 0;
989
990 err_provision:
991 misc_deregister(&sgx_dev_provision);
992
993 err_kthread:
994 kthread_stop(ksgxd_tsk);
995
996 err_page_cache:
997 for (i = 0; i < sgx_nr_epc_sections; i++) {
998 vfree(sgx_epc_sections[i].pages);
999 memunmap(sgx_epc_sections[i].virt_addr);
1000 }
1001
1002 return ret;
1003 }
1004
1005 device_initcall(sgx_init);
1006