1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <asm/coco.h>
9 #include <asm/tdx.h>
10 #include <asm/vmx.h>
11 #include <asm/insn.h>
12 #include <asm/insn-eval.h>
13 #include <asm/pgtable.h>
14
15 /* TDX module Call Leaf IDs */
16 #define TDX_GET_INFO 1
17 #define TDX_GET_VEINFO 3
18 #define TDX_ACCEPT_PAGE 6
19
20 /* TDX hypercall Leaf IDs */
21 #define TDVMCALL_MAP_GPA 0x10001
22
23 /* MMIO direction */
24 #define EPT_READ 0
25 #define EPT_WRITE 1
26
27 /* Port I/O direction */
28 #define PORT_READ 0
29 #define PORT_WRITE 1
30
31 /* See Exit Qualification for I/O Instructions in VMX documentation */
32 #define VE_IS_IO_IN(e) ((e) & BIT(3))
33 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
34 #define VE_GET_PORT_NUM(e) ((e) >> 16)
35 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
36
37 /*
38 * Wrapper for standard use of __tdx_hypercall with no output aside from
39 * return code.
40 */
_tdx_hypercall(u64 fn,u64 r12,u64 r13,u64 r14,u64 r15)41 static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15)
42 {
43 struct tdx_hypercall_args args = {
44 .r10 = TDX_HYPERCALL_STANDARD,
45 .r11 = fn,
46 .r12 = r12,
47 .r13 = r13,
48 .r14 = r14,
49 .r15 = r15,
50 };
51
52 return __tdx_hypercall(&args, 0);
53 }
54
55 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)56 void __tdx_hypercall_failed(void)
57 {
58 panic("TDVMCALL failed. TDX module bug?");
59 }
60
61 /*
62 * The TDG.VP.VMCALL-Instruction-execution sub-functions are defined
63 * independently from but are currently matched 1:1 with VMX EXIT_REASONs.
64 * Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
65 * guest sides of these calls.
66 */
hcall_func(u64 exit_reason)67 static u64 hcall_func(u64 exit_reason)
68 {
69 return exit_reason;
70 }
71
72 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)73 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
74 unsigned long p3, unsigned long p4)
75 {
76 struct tdx_hypercall_args args = {
77 .r10 = nr,
78 .r11 = p1,
79 .r12 = p2,
80 .r13 = p3,
81 .r14 = p4,
82 };
83
84 return __tdx_hypercall(&args, 0);
85 }
86 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
87 #endif
88
89 /*
90 * Used for TDX guests to make calls directly to the TD module. This
91 * should only be used for calls that have no legitimate reason to fail
92 * or where the kernel can not survive the call failing.
93 */
tdx_module_call(u64 fn,u64 rcx,u64 rdx,u64 r8,u64 r9,struct tdx_module_output * out)94 static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
95 struct tdx_module_output *out)
96 {
97 if (__tdx_module_call(fn, rcx, rdx, r8, r9, out))
98 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
99 }
100
get_cc_mask(void)101 static u64 get_cc_mask(void)
102 {
103 struct tdx_module_output out;
104 unsigned int gpa_width;
105
106 /*
107 * TDINFO TDX module call is used to get the TD execution environment
108 * information like GPA width, number of available vcpus, debug mode
109 * information, etc. More details about the ABI can be found in TDX
110 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
111 * [TDG.VP.INFO].
112 *
113 * The GPA width that comes out of this call is critical. TDX guests
114 * can not meaningfully run without it.
115 */
116 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out);
117
118 gpa_width = out.rcx & GENMASK(5, 0);
119
120 /*
121 * The highest bit of a guest physical address is the "sharing" bit.
122 * Set it for shared pages and clear it for private pages.
123 */
124 return BIT_ULL(gpa_width - 1);
125 }
126
127 /*
128 * The TDX module spec states that #VE may be injected for a limited set of
129 * reasons:
130 *
131 * - Emulation of the architectural #VE injection on EPT violation;
132 *
133 * - As a result of guest TD execution of a disallowed instruction,
134 * a disallowed MSR access, or CPUID virtualization;
135 *
136 * - A notification to the guest TD about anomalous behavior;
137 *
138 * The last one is opt-in and is not used by the kernel.
139 *
140 * The Intel Software Developer's Manual describes cases when instruction
141 * length field can be used in section "Information for VM Exits Due to
142 * Instruction Execution".
143 *
144 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
145 * information if #VE occurred due to instruction execution, but not for EPT
146 * violations.
147 */
ve_instr_len(struct ve_info * ve)148 static int ve_instr_len(struct ve_info *ve)
149 {
150 switch (ve->exit_reason) {
151 case EXIT_REASON_HLT:
152 case EXIT_REASON_MSR_READ:
153 case EXIT_REASON_MSR_WRITE:
154 case EXIT_REASON_CPUID:
155 case EXIT_REASON_IO_INSTRUCTION:
156 /* It is safe to use ve->instr_len for #VE due instructions */
157 return ve->instr_len;
158 case EXIT_REASON_EPT_VIOLATION:
159 /*
160 * For EPT violations, ve->insn_len is not defined. For those,
161 * the kernel must decode instructions manually and should not
162 * be using this function.
163 */
164 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
165 return 0;
166 default:
167 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
168 return ve->instr_len;
169 }
170 }
171
__halt(const bool irq_disabled,const bool do_sti)172 static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
173 {
174 struct tdx_hypercall_args args = {
175 .r10 = TDX_HYPERCALL_STANDARD,
176 .r11 = hcall_func(EXIT_REASON_HLT),
177 .r12 = irq_disabled,
178 };
179
180 /*
181 * Emulate HLT operation via hypercall. More info about ABI
182 * can be found in TDX Guest-Host-Communication Interface
183 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
184 *
185 * The VMM uses the "IRQ disabled" param to understand IRQ
186 * enabled status (RFLAGS.IF) of the TD guest and to determine
187 * whether or not it should schedule the halted vCPU if an
188 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
189 * can keep the vCPU in virtual HLT, even if an IRQ is
190 * pending, without hanging/breaking the guest.
191 */
192 return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0);
193 }
194
handle_halt(struct ve_info * ve)195 static int handle_halt(struct ve_info *ve)
196 {
197 /*
198 * Since non safe halt is mainly used in CPU offlining
199 * and the guest will always stay in the halt state, don't
200 * call the STI instruction (set do_sti as false).
201 */
202 const bool irq_disabled = irqs_disabled();
203 const bool do_sti = false;
204
205 if (__halt(irq_disabled, do_sti))
206 return -EIO;
207
208 return ve_instr_len(ve);
209 }
210
tdx_safe_halt(void)211 void __cpuidle tdx_safe_halt(void)
212 {
213 /*
214 * For do_sti=true case, __tdx_hypercall() function enables
215 * interrupts using the STI instruction before the TDCALL. So
216 * set irq_disabled as false.
217 */
218 const bool irq_disabled = false;
219 const bool do_sti = true;
220
221 /*
222 * Use WARN_ONCE() to report the failure.
223 */
224 if (__halt(irq_disabled, do_sti))
225 WARN_ONCE(1, "HLT instruction emulation failed\n");
226 }
227
read_msr(struct pt_regs * regs,struct ve_info * ve)228 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
229 {
230 struct tdx_hypercall_args args = {
231 .r10 = TDX_HYPERCALL_STANDARD,
232 .r11 = hcall_func(EXIT_REASON_MSR_READ),
233 .r12 = regs->cx,
234 };
235
236 /*
237 * Emulate the MSR read via hypercall. More info about ABI
238 * can be found in TDX Guest-Host-Communication Interface
239 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
240 */
241 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
242 return -EIO;
243
244 regs->ax = lower_32_bits(args.r11);
245 regs->dx = upper_32_bits(args.r11);
246 return ve_instr_len(ve);
247 }
248
write_msr(struct pt_regs * regs,struct ve_info * ve)249 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
250 {
251 struct tdx_hypercall_args args = {
252 .r10 = TDX_HYPERCALL_STANDARD,
253 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
254 .r12 = regs->cx,
255 .r13 = (u64)regs->dx << 32 | regs->ax,
256 };
257
258 /*
259 * Emulate the MSR write via hypercall. More info about ABI
260 * can be found in TDX Guest-Host-Communication Interface
261 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
262 */
263 if (__tdx_hypercall(&args, 0))
264 return -EIO;
265
266 return ve_instr_len(ve);
267 }
268
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)269 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
270 {
271 struct tdx_hypercall_args args = {
272 .r10 = TDX_HYPERCALL_STANDARD,
273 .r11 = hcall_func(EXIT_REASON_CPUID),
274 .r12 = regs->ax,
275 .r13 = regs->cx,
276 };
277
278 /*
279 * Only allow VMM to control range reserved for hypervisor
280 * communication.
281 *
282 * Return all-zeros for any CPUID outside the range. It matches CPU
283 * behaviour for non-supported leaf.
284 */
285 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
286 regs->ax = regs->bx = regs->cx = regs->dx = 0;
287 return ve_instr_len(ve);
288 }
289
290 /*
291 * Emulate the CPUID instruction via a hypercall. More info about
292 * ABI can be found in TDX Guest-Host-Communication Interface
293 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
294 */
295 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
296 return -EIO;
297
298 /*
299 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
300 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
301 * So copy the register contents back to pt_regs.
302 */
303 regs->ax = args.r12;
304 regs->bx = args.r13;
305 regs->cx = args.r14;
306 regs->dx = args.r15;
307
308 return ve_instr_len(ve);
309 }
310
mmio_read(int size,unsigned long addr,unsigned long * val)311 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
312 {
313 struct tdx_hypercall_args args = {
314 .r10 = TDX_HYPERCALL_STANDARD,
315 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
316 .r12 = size,
317 .r13 = EPT_READ,
318 .r14 = addr,
319 .r15 = *val,
320 };
321
322 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
323 return false;
324 *val = args.r11;
325 return true;
326 }
327
mmio_write(int size,unsigned long addr,unsigned long val)328 static bool mmio_write(int size, unsigned long addr, unsigned long val)
329 {
330 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
331 EPT_WRITE, addr, val);
332 }
333
handle_mmio(struct pt_regs * regs,struct ve_info * ve)334 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
335 {
336 unsigned long *reg, val, vaddr;
337 char buffer[MAX_INSN_SIZE];
338 struct insn insn = {};
339 enum mmio_type mmio;
340 int size, extend_size;
341 u8 extend_val = 0;
342
343 /* Only in-kernel MMIO is supported */
344 if (WARN_ON_ONCE(user_mode(regs)))
345 return -EFAULT;
346
347 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
348 return -EFAULT;
349
350 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
351 return -EINVAL;
352
353 mmio = insn_decode_mmio(&insn, &size);
354 if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
355 return -EINVAL;
356
357 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
358 reg = insn_get_modrm_reg_ptr(&insn, regs);
359 if (!reg)
360 return -EINVAL;
361 }
362
363 /*
364 * Reject EPT violation #VEs that split pages.
365 *
366 * MMIO accesses are supposed to be naturally aligned and therefore
367 * never cross page boundaries. Seeing split page accesses indicates
368 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
369 *
370 * load_unaligned_zeropad() will recover using exception fixups.
371 */
372 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
373 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
374 return -EFAULT;
375
376 /* Handle writes first */
377 switch (mmio) {
378 case MMIO_WRITE:
379 memcpy(&val, reg, size);
380 if (!mmio_write(size, ve->gpa, val))
381 return -EIO;
382 return insn.length;
383 case MMIO_WRITE_IMM:
384 val = insn.immediate.value;
385 if (!mmio_write(size, ve->gpa, val))
386 return -EIO;
387 return insn.length;
388 case MMIO_READ:
389 case MMIO_READ_ZERO_EXTEND:
390 case MMIO_READ_SIGN_EXTEND:
391 /* Reads are handled below */
392 break;
393 case MMIO_MOVS:
394 case MMIO_DECODE_FAILED:
395 /*
396 * MMIO was accessed with an instruction that could not be
397 * decoded or handled properly. It was likely not using io.h
398 * helpers or accessed MMIO accidentally.
399 */
400 return -EINVAL;
401 default:
402 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
403 return -EINVAL;
404 }
405
406 /* Handle reads */
407 if (!mmio_read(size, ve->gpa, &val))
408 return -EIO;
409
410 switch (mmio) {
411 case MMIO_READ:
412 /* Zero-extend for 32-bit operation */
413 extend_size = size == 4 ? sizeof(*reg) : 0;
414 break;
415 case MMIO_READ_ZERO_EXTEND:
416 /* Zero extend based on operand size */
417 extend_size = insn.opnd_bytes;
418 break;
419 case MMIO_READ_SIGN_EXTEND:
420 /* Sign extend based on operand size */
421 extend_size = insn.opnd_bytes;
422 if (size == 1 && val & BIT(7))
423 extend_val = 0xFF;
424 else if (size > 1 && val & BIT(15))
425 extend_val = 0xFF;
426 break;
427 default:
428 /* All other cases has to be covered with the first switch() */
429 WARN_ON_ONCE(1);
430 return -EINVAL;
431 }
432
433 if (extend_size)
434 memset(reg, extend_val, extend_size);
435 memcpy(reg, &val, size);
436 return insn.length;
437 }
438
handle_in(struct pt_regs * regs,int size,int port)439 static bool handle_in(struct pt_regs *regs, int size, int port)
440 {
441 struct tdx_hypercall_args args = {
442 .r10 = TDX_HYPERCALL_STANDARD,
443 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
444 .r12 = size,
445 .r13 = PORT_READ,
446 .r14 = port,
447 };
448 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
449 bool success;
450
451 /*
452 * Emulate the I/O read via hypercall. More info about ABI can be found
453 * in TDX Guest-Host-Communication Interface (GHCI) section titled
454 * "TDG.VP.VMCALL<Instruction.IO>".
455 */
456 success = !__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT);
457
458 /* Update part of the register affected by the emulated instruction */
459 regs->ax &= ~mask;
460 if (success)
461 regs->ax |= args.r11 & mask;
462
463 return success;
464 }
465
handle_out(struct pt_regs * regs,int size,int port)466 static bool handle_out(struct pt_regs *regs, int size, int port)
467 {
468 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
469
470 /*
471 * Emulate the I/O write via hypercall. More info about ABI can be found
472 * in TDX Guest-Host-Communication Interface (GHCI) section titled
473 * "TDG.VP.VMCALL<Instruction.IO>".
474 */
475 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
476 PORT_WRITE, port, regs->ax & mask);
477 }
478
479 /*
480 * Emulate I/O using hypercall.
481 *
482 * Assumes the IO instruction was using ax, which is enforced
483 * by the standard io.h macros.
484 *
485 * Return True on success or False on failure.
486 */
handle_io(struct pt_regs * regs,struct ve_info * ve)487 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
488 {
489 u32 exit_qual = ve->exit_qual;
490 int size, port;
491 bool in, ret;
492
493 if (VE_IS_IO_STRING(exit_qual))
494 return -EIO;
495
496 in = VE_IS_IO_IN(exit_qual);
497 size = VE_GET_IO_SIZE(exit_qual);
498 port = VE_GET_PORT_NUM(exit_qual);
499
500
501 if (in)
502 ret = handle_in(regs, size, port);
503 else
504 ret = handle_out(regs, size, port);
505 if (!ret)
506 return -EIO;
507
508 return ve_instr_len(ve);
509 }
510
511 /*
512 * Early #VE exception handler. Only handles a subset of port I/O.
513 * Intended only for earlyprintk. If failed, return false.
514 */
tdx_early_handle_ve(struct pt_regs * regs)515 __init bool tdx_early_handle_ve(struct pt_regs *regs)
516 {
517 struct ve_info ve;
518 int insn_len;
519
520 tdx_get_ve_info(&ve);
521
522 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
523 return false;
524
525 insn_len = handle_io(regs, &ve);
526 if (insn_len < 0)
527 return false;
528
529 regs->ip += insn_len;
530 return true;
531 }
532
tdx_get_ve_info(struct ve_info * ve)533 void tdx_get_ve_info(struct ve_info *ve)
534 {
535 struct tdx_module_output out;
536
537 /*
538 * Called during #VE handling to retrieve the #VE info from the
539 * TDX module.
540 *
541 * This has to be called early in #VE handling. A "nested" #VE which
542 * occurs before this will raise a #DF and is not recoverable.
543 *
544 * The call retrieves the #VE info from the TDX module, which also
545 * clears the "#VE valid" flag. This must be done before anything else
546 * because any #VE that occurs while the valid flag is set will lead to
547 * #DF.
548 *
549 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
550 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
551 */
552 tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
553
554 /* Transfer the output parameters */
555 ve->exit_reason = out.rcx;
556 ve->exit_qual = out.rdx;
557 ve->gla = out.r8;
558 ve->gpa = out.r9;
559 ve->instr_len = lower_32_bits(out.r10);
560 ve->instr_info = upper_32_bits(out.r10);
561 }
562
563 /*
564 * Handle the user initiated #VE.
565 *
566 * On success, returns the number of bytes RIP should be incremented (>=0)
567 * or -errno on error.
568 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)569 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
570 {
571 switch (ve->exit_reason) {
572 case EXIT_REASON_CPUID:
573 return handle_cpuid(regs, ve);
574 default:
575 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
576 return -EIO;
577 }
578 }
579
580 /*
581 * Handle the kernel #VE.
582 *
583 * On success, returns the number of bytes RIP should be incremented (>=0)
584 * or -errno on error.
585 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)586 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
587 {
588 switch (ve->exit_reason) {
589 case EXIT_REASON_HLT:
590 return handle_halt(ve);
591 case EXIT_REASON_MSR_READ:
592 return read_msr(regs, ve);
593 case EXIT_REASON_MSR_WRITE:
594 return write_msr(regs, ve);
595 case EXIT_REASON_CPUID:
596 return handle_cpuid(regs, ve);
597 case EXIT_REASON_EPT_VIOLATION:
598 return handle_mmio(regs, ve);
599 case EXIT_REASON_IO_INSTRUCTION:
600 return handle_io(regs, ve);
601 default:
602 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
603 return -EIO;
604 }
605 }
606
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)607 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
608 {
609 int insn_len;
610
611 if (user_mode(regs))
612 insn_len = virt_exception_user(regs, ve);
613 else
614 insn_len = virt_exception_kernel(regs, ve);
615 if (insn_len < 0)
616 return false;
617
618 /* After successful #VE handling, move the IP */
619 regs->ip += insn_len;
620
621 return true;
622 }
623
tdx_tlb_flush_required(bool private)624 static bool tdx_tlb_flush_required(bool private)
625 {
626 /*
627 * TDX guest is responsible for flushing TLB on private->shared
628 * transition. VMM is responsible for flushing on shared->private.
629 *
630 * The VMM _can't_ flush private addresses as it can't generate PAs
631 * with the guest's HKID. Shared memory isn't subject to integrity
632 * checking, i.e. the VMM doesn't need to flush for its own protection.
633 *
634 * There's no need to flush when converting from shared to private,
635 * as flushing is the VMM's responsibility in this case, e.g. it must
636 * flush to avoid integrity failures in the face of a buggy or
637 * malicious guest.
638 */
639 return !private;
640 }
641
tdx_cache_flush_required(void)642 static bool tdx_cache_flush_required(void)
643 {
644 /*
645 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
646 * TDX doesn't have such capability.
647 *
648 * Flush cache unconditionally.
649 */
650 return true;
651 }
652
try_accept_one(phys_addr_t * start,unsigned long len,enum pg_level pg_level)653 static bool try_accept_one(phys_addr_t *start, unsigned long len,
654 enum pg_level pg_level)
655 {
656 unsigned long accept_size = page_level_size(pg_level);
657 u64 tdcall_rcx;
658 u8 page_size;
659
660 if (!IS_ALIGNED(*start, accept_size))
661 return false;
662
663 if (len < accept_size)
664 return false;
665
666 /*
667 * Pass the page physical address to the TDX module to accept the
668 * pending, private page.
669 *
670 * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G.
671 */
672 switch (pg_level) {
673 case PG_LEVEL_4K:
674 page_size = 0;
675 break;
676 case PG_LEVEL_2M:
677 page_size = 1;
678 break;
679 case PG_LEVEL_1G:
680 page_size = 2;
681 break;
682 default:
683 return false;
684 }
685
686 tdcall_rcx = *start | page_size;
687 if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL))
688 return false;
689
690 *start += accept_size;
691 return true;
692 }
693
694 /*
695 * Inform the VMM of the guest's intent for this physical page: shared with
696 * the VMM or private to the guest. The VMM is expected to change its mapping
697 * of the page in response.
698 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)699 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
700 {
701 phys_addr_t start = __pa(vaddr);
702 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
703
704 if (!enc) {
705 /* Set the shared (decrypted) bits: */
706 start |= cc_mkdec(0);
707 end |= cc_mkdec(0);
708 }
709
710 /*
711 * Notify the VMM about page mapping conversion. More info about ABI
712 * can be found in TDX Guest-Host-Communication Interface (GHCI),
713 * section "TDG.VP.VMCALL<MapGPA>"
714 */
715 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
716 return false;
717
718 /* private->shared conversion requires only MapGPA call */
719 if (!enc)
720 return true;
721
722 /*
723 * For shared->private conversion, accept the page using
724 * TDX_ACCEPT_PAGE TDX module call.
725 */
726 while (start < end) {
727 unsigned long len = end - start;
728
729 /*
730 * Try larger accepts first. It gives chance to VMM to keep
731 * 1G/2M SEPT entries where possible and speeds up process by
732 * cutting number of hypercalls (if successful).
733 */
734
735 if (try_accept_one(&start, len, PG_LEVEL_1G))
736 continue;
737
738 if (try_accept_one(&start, len, PG_LEVEL_2M))
739 continue;
740
741 if (!try_accept_one(&start, len, PG_LEVEL_4K))
742 return false;
743 }
744
745 return true;
746 }
747
tdx_early_init(void)748 void __init tdx_early_init(void)
749 {
750 u64 cc_mask;
751 u32 eax, sig[3];
752
753 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
754
755 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
756 return;
757
758 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
759
760 cc_set_vendor(CC_VENDOR_INTEL);
761 cc_mask = get_cc_mask();
762 cc_set_mask(cc_mask);
763
764 /*
765 * All bits above GPA width are reserved and kernel treats shared bit
766 * as flag, not as part of physical address.
767 *
768 * Adjust physical mask to only cover valid GPA bits.
769 */
770 physical_mask &= cc_mask - 1;
771
772 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
773 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
774 x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed;
775
776 pr_info("Guest detected\n");
777 }
778