1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <efi.h>
4 #include <efilib.h>
5 #if defined(__i386__) || defined(__x86_64__)
6 #include <cpuid.h>
7 #endif
8
9 #include "ticks.h"
10
11 #if defined(__i386__) || defined(__x86_64__)
in_hypervisor(void)12 static BOOLEAN in_hypervisor(void) {
13 uint32_t eax, ebx, ecx, edx;
14
15 /* The TSC might or might not be virtualized in VMs (and thus might not be accurate or start at zero
16 * at boot), depending on hypervisor and CPU functionality. If it's not virtualized it's not useful
17 * for keeping time, hence don't attempt to use it.
18 *
19 * This is a dumbed down version of src/basic/virt.c's detect_vm() that safely works in the UEFI
20 * environment. */
21
22 if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) == 0)
23 return FALSE;
24
25 return !!(ecx & 0x80000000U);
26 }
27 #endif
28
29 #ifdef __x86_64__
ticks_read(void)30 static UINT64 ticks_read(void) {
31 UINT64 a, d;
32
33 if (in_hypervisor())
34 return 0;
35
36 __asm__ volatile ("rdtsc" : "=a" (a), "=d" (d));
37 return (d << 32) | a;
38 }
39 #elif defined(__i386__)
ticks_read(void)40 static UINT64 ticks_read(void) {
41 UINT64 val;
42
43 if (in_hypervisor())
44 return 0;
45
46 __asm__ volatile ("rdtsc" : "=A" (val));
47 return val;
48 }
49 #elif defined(__aarch64__)
ticks_read(void)50 static UINT64 ticks_read(void) {
51 UINT64 val;
52 __asm__ volatile ("mrs %0, cntpct_el0" : "=r" (val));
53 return val;
54 }
55 #else
ticks_read(void)56 static UINT64 ticks_read(void) {
57 return 0;
58 }
59 #endif
60
61 #if defined(__aarch64__)
ticks_freq(void)62 static UINT64 ticks_freq(void) {
63 UINT64 freq;
64 __asm__ volatile ("mrs %0, cntfrq_el0": "=r" (freq));
65 return freq;
66 }
67 #else
68 /* count TSC ticks during a millisecond delay */
ticks_freq(void)69 static UINT64 ticks_freq(void) {
70 UINT64 ticks_start, ticks_end;
71 static UINT64 cache = 0;
72
73 if (cache != 0)
74 return cache;
75
76 ticks_start = ticks_read();
77 BS->Stall(1000);
78 ticks_end = ticks_read();
79
80 if (ticks_end < ticks_start) /* Check for an overflow (which is not that unlikely, given on some
81 * archs the value is 32bit) */
82 return 0;
83
84 cache = (ticks_end - ticks_start) * 1000UL;
85 return cache;
86 }
87 #endif
88
time_usec(void)89 UINT64 time_usec(void) {
90 UINT64 ticks, freq;
91
92 ticks = ticks_read();
93 if (ticks == 0)
94 return 0;
95
96 freq = ticks_freq();
97 if (freq == 0)
98 return 0;
99
100 return 1000UL * 1000UL * ticks / freq;
101 }
102