1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/efi.h>
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <asm/unaccepted_memory.h>
7
8 /* Protects unaccepted memory bitmap and accepting_list */
9 static DEFINE_SPINLOCK(unaccepted_memory_lock);
10
11 struct accept_range {
12 struct list_head list;
13 unsigned long start;
14 unsigned long end;
15 };
16
17 static LIST_HEAD(accepting_list);
18
19 /*
20 * accept_memory() -- Consult bitmap and accept the memory if needed.
21 *
22 * Only memory that is explicitly marked as unaccepted in the bitmap requires
23 * an action. All the remaining memory is implicitly accepted and doesn't need
24 * acceptance.
25 *
26 * No need to accept:
27 * - anything if the system has no unaccepted table;
28 * - memory that is below phys_base;
29 * - memory that is above the memory that addressable by the bitmap;
30 */
accept_memory(phys_addr_t start,phys_addr_t end)31 void accept_memory(phys_addr_t start, phys_addr_t end)
32 {
33 struct efi_unaccepted_memory *unaccepted;
34 unsigned long range_start, range_end;
35 struct accept_range range, *entry;
36 unsigned long flags;
37 u64 unit_size;
38
39 unaccepted = efi_get_unaccepted_table();
40 if (!unaccepted)
41 return;
42
43 unit_size = unaccepted->unit_size;
44
45 /*
46 * Only care for the part of the range that is represented
47 * in the bitmap.
48 */
49 if (start < unaccepted->phys_base)
50 start = unaccepted->phys_base;
51 if (end < unaccepted->phys_base)
52 return;
53
54 /* Translate to offsets from the beginning of the bitmap */
55 start -= unaccepted->phys_base;
56 end -= unaccepted->phys_base;
57
58 /*
59 * load_unaligned_zeropad() can lead to unwanted loads across page
60 * boundaries. The unwanted loads are typically harmless. But, they
61 * might be made to totally unrelated or even unmapped memory.
62 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
63 * #VE) to recover from these unwanted loads.
64 *
65 * But, this approach does not work for unaccepted memory. For TDX, a
66 * load from unaccepted memory will not lead to a recoverable exception
67 * within the guest. The guest will exit to the VMM where the only
68 * recourse is to terminate the guest.
69 *
70 * There are two parts to fix this issue and comprehensively avoid
71 * access to unaccepted memory. Together these ensure that an extra
72 * "guard" page is accepted in addition to the memory that needs to be
73 * used:
74 *
75 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
76 * checks up to end+unit_size if 'end' is aligned on a unit_size
77 * boundary.
78 *
79 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
80 * 'end' is aligned on a unit_size boundary. (immediately following
81 * this comment)
82 */
83 if (!(end % unit_size))
84 end += unit_size;
85
86 /* Make sure not to overrun the bitmap */
87 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
88 end = unaccepted->size * unit_size * BITS_PER_BYTE;
89
90 range.start = start / unit_size;
91 range.end = DIV_ROUND_UP(end, unit_size);
92 retry:
93 spin_lock_irqsave(&unaccepted_memory_lock, flags);
94
95 /*
96 * Check if anybody works on accepting the same range of the memory.
97 *
98 * The check is done with unit_size granularity. It is crucial to catch
99 * all accept requests to the same unit_size block, even if they don't
100 * overlap on physical address level.
101 */
102 list_for_each_entry(entry, &accepting_list, list) {
103 if (entry->end <= range.start)
104 continue;
105 if (entry->start >= range.end)
106 continue;
107
108 /*
109 * Somebody else accepting the range. Or at least part of it.
110 *
111 * Drop the lock and retry until it is complete.
112 */
113 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
114 goto retry;
115 }
116
117 /*
118 * Register that the range is about to be accepted.
119 * Make sure nobody else will accept it.
120 */
121 list_add(&range.list, &accepting_list);
122
123 range_start = range.start;
124 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
125 range.end) {
126 unsigned long phys_start, phys_end;
127 unsigned long len = range_end - range_start;
128
129 phys_start = range_start * unit_size + unaccepted->phys_base;
130 phys_end = range_end * unit_size + unaccepted->phys_base;
131
132 /*
133 * Keep interrupts disabled until the accept operation is
134 * complete in order to prevent deadlocks.
135 *
136 * Enabling interrupts before calling arch_accept_memory()
137 * creates an opportunity for an interrupt handler to request
138 * acceptance for the same memory. The handler will continuously
139 * spin with interrupts disabled, preventing other task from
140 * making progress with the acceptance process.
141 */
142 spin_unlock(&unaccepted_memory_lock);
143
144 arch_accept_memory(phys_start, phys_end);
145
146 spin_lock(&unaccepted_memory_lock);
147 bitmap_clear(unaccepted->bitmap, range_start, len);
148 }
149
150 list_del(&range.list);
151 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
152 }
153
range_contains_unaccepted_memory(phys_addr_t start,phys_addr_t end)154 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
155 {
156 struct efi_unaccepted_memory *unaccepted;
157 unsigned long flags;
158 bool ret = false;
159 u64 unit_size;
160
161 unaccepted = efi_get_unaccepted_table();
162 if (!unaccepted)
163 return false;
164
165 unit_size = unaccepted->unit_size;
166
167 /*
168 * Only care for the part of the range that is represented
169 * in the bitmap.
170 */
171 if (start < unaccepted->phys_base)
172 start = unaccepted->phys_base;
173 if (end < unaccepted->phys_base)
174 return false;
175
176 /* Translate to offsets from the beginning of the bitmap */
177 start -= unaccepted->phys_base;
178 end -= unaccepted->phys_base;
179
180 /*
181 * Also consider the unaccepted state of the *next* page. See fix #1 in
182 * the comment on load_unaligned_zeropad() in accept_memory().
183 */
184 if (!(end % unit_size))
185 end += unit_size;
186
187 /* Make sure not to overrun the bitmap */
188 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
189 end = unaccepted->size * unit_size * BITS_PER_BYTE;
190
191 spin_lock_irqsave(&unaccepted_memory_lock, flags);
192 while (start < end) {
193 if (test_bit(start / unit_size, unaccepted->bitmap)) {
194 ret = true;
195 break;
196 }
197
198 start += unit_size;
199 }
200 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
201
202 return ret;
203 }
204