1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables kernel and guest-mode vCPU access to guest physical
6  * memory with suitable invalidation mechanisms.
7  *
8  * Copyright © 2021 Amazon.com, Inc. or its affiliates.
9  *
10  * Authors:
11  *   David Woodhouse <dwmw2@infradead.org>
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/kvm.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 
20 #include "kvm_mm.h"
21 
22 /*
23  * MMU notifier 'invalidate_range_start' hook.
24  */
gfn_to_pfn_cache_invalidate_start(struct kvm * kvm,unsigned long start,unsigned long end,bool may_block)25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26 				       unsigned long end, bool may_block)
27 {
28 	DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
29 	struct gfn_to_pfn_cache *gpc;
30 	bool evict_vcpus = false;
31 
32 	spin_lock(&kvm->gpc_lock);
33 	list_for_each_entry(gpc, &kvm->gpc_list, list) {
34 		write_lock_irq(&gpc->lock);
35 
36 		/* Only a single page so no need to care about length */
37 		if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
38 		    gpc->uhva >= start && gpc->uhva < end) {
39 			gpc->valid = false;
40 
41 			/*
42 			 * If a guest vCPU could be using the physical address,
43 			 * it needs to be forced out of guest mode.
44 			 */
45 			if (gpc->usage & KVM_GUEST_USES_PFN) {
46 				if (!evict_vcpus) {
47 					evict_vcpus = true;
48 					bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
49 				}
50 				__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
51 			}
52 		}
53 		write_unlock_irq(&gpc->lock);
54 	}
55 	spin_unlock(&kvm->gpc_lock);
56 
57 	if (evict_vcpus) {
58 		/*
59 		 * KVM needs to ensure the vCPU is fully out of guest context
60 		 * before allowing the invalidation to continue.
61 		 */
62 		unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
63 		bool called;
64 
65 		/*
66 		 * If the OOM reaper is active, then all vCPUs should have
67 		 * been stopped already, so perform the request without
68 		 * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
69 		 */
70 		if (!may_block)
71 			req &= ~KVM_REQUEST_WAIT;
72 
73 		called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
74 
75 		WARN_ON_ONCE(called && !may_block);
76 	}
77 }
78 
kvm_gfn_to_pfn_cache_check(struct kvm * kvm,struct gfn_to_pfn_cache * gpc,gpa_t gpa,unsigned long len)79 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
80 				gpa_t gpa, unsigned long len)
81 {
82 	struct kvm_memslots *slots = kvm_memslots(kvm);
83 
84 	if (!gpc->active)
85 		return false;
86 
87 	if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
88 		return false;
89 
90 	if (gpc->gpa != gpa || gpc->generation != slots->generation ||
91 	    kvm_is_error_hva(gpc->uhva))
92 		return false;
93 
94 	if (!gpc->valid)
95 		return false;
96 
97 	return true;
98 }
99 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
100 
gpc_unmap_khva(struct kvm * kvm,kvm_pfn_t pfn,void * khva)101 static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
102 {
103 	/* Unmap the old pfn/page if it was mapped before. */
104 	if (!is_error_noslot_pfn(pfn) && khva) {
105 		if (pfn_valid(pfn))
106 			kunmap(pfn_to_page(pfn));
107 #ifdef CONFIG_HAS_IOMEM
108 		else
109 			memunmap(khva);
110 #endif
111 	}
112 }
113 
mmu_notifier_retry_cache(struct kvm * kvm,unsigned long mmu_seq)114 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
115 {
116 	/*
117 	 * mn_active_invalidate_count acts for all intents and purposes
118 	 * like mmu_invalidate_in_progress here; but the latter cannot
119 	 * be used here because the invalidation of caches in the
120 	 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
121 	 * is elevated.
122 	 *
123 	 * Note, it does not matter that mn_active_invalidate_count
124 	 * is not protected by gpc->lock.  It is guaranteed to
125 	 * be elevated before the mmu_notifier acquires gpc->lock, and
126 	 * isn't dropped until after mmu_invalidate_seq is updated.
127 	 */
128 	if (kvm->mn_active_invalidate_count)
129 		return true;
130 
131 	/*
132 	 * Ensure mn_active_invalidate_count is read before
133 	 * mmu_invalidate_seq.  This pairs with the smp_wmb() in
134 	 * mmu_notifier_invalidate_range_end() to guarantee either the
135 	 * old (non-zero) value of mn_active_invalidate_count or the
136 	 * new (incremented) value of mmu_invalidate_seq is observed.
137 	 */
138 	smp_rmb();
139 	return kvm->mmu_invalidate_seq != mmu_seq;
140 }
141 
hva_to_pfn_retry(struct kvm * kvm,struct gfn_to_pfn_cache * gpc)142 static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
143 {
144 	/* Note, the new page offset may be different than the old! */
145 	void *old_khva = gpc->khva - offset_in_page(gpc->khva);
146 	kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
147 	void *new_khva = NULL;
148 	unsigned long mmu_seq;
149 
150 	lockdep_assert_held(&gpc->refresh_lock);
151 
152 	lockdep_assert_held_write(&gpc->lock);
153 
154 	/*
155 	 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
156 	 * assets have already been updated and so a concurrent check() from a
157 	 * different task may not fail the gpa/uhva/generation checks.
158 	 */
159 	gpc->valid = false;
160 
161 	do {
162 		mmu_seq = kvm->mmu_invalidate_seq;
163 		smp_rmb();
164 
165 		write_unlock_irq(&gpc->lock);
166 
167 		/*
168 		 * If the previous iteration "failed" due to an mmu_notifier
169 		 * event, release the pfn and unmap the kernel virtual address
170 		 * from the previous attempt.  Unmapping might sleep, so this
171 		 * needs to be done after dropping the lock.  Opportunistically
172 		 * check for resched while the lock isn't held.
173 		 */
174 		if (new_pfn != KVM_PFN_ERR_FAULT) {
175 			/*
176 			 * Keep the mapping if the previous iteration reused
177 			 * the existing mapping and didn't create a new one.
178 			 */
179 			if (new_khva != old_khva)
180 				gpc_unmap_khva(kvm, new_pfn, new_khva);
181 
182 			kvm_release_pfn_clean(new_pfn);
183 
184 			cond_resched();
185 		}
186 
187 		/* We always request a writeable mapping */
188 		new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
189 		if (is_error_noslot_pfn(new_pfn))
190 			goto out_error;
191 
192 		/*
193 		 * Obtain a new kernel mapping if KVM itself will access the
194 		 * pfn.  Note, kmap() and memremap() can both sleep, so this
195 		 * too must be done outside of gpc->lock!
196 		 */
197 		if (gpc->usage & KVM_HOST_USES_PFN) {
198 			if (new_pfn == gpc->pfn) {
199 				new_khva = old_khva;
200 			} else if (pfn_valid(new_pfn)) {
201 				new_khva = kmap(pfn_to_page(new_pfn));
202 #ifdef CONFIG_HAS_IOMEM
203 			} else {
204 				new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
205 #endif
206 			}
207 			if (!new_khva) {
208 				kvm_release_pfn_clean(new_pfn);
209 				goto out_error;
210 			}
211 		}
212 
213 		write_lock_irq(&gpc->lock);
214 
215 		/*
216 		 * Other tasks must wait for _this_ refresh to complete before
217 		 * attempting to refresh.
218 		 */
219 		WARN_ON_ONCE(gpc->valid);
220 	} while (mmu_notifier_retry_cache(kvm, mmu_seq));
221 
222 	gpc->valid = true;
223 	gpc->pfn = new_pfn;
224 	gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
225 
226 	/*
227 	 * Put the reference to the _new_ pfn.  The pfn is now tracked by the
228 	 * cache and can be safely migrated, swapped, etc... as the cache will
229 	 * invalidate any mappings in response to relevant mmu_notifier events.
230 	 */
231 	kvm_release_pfn_clean(new_pfn);
232 
233 	return 0;
234 
235 out_error:
236 	write_lock_irq(&gpc->lock);
237 
238 	return -EFAULT;
239 }
240 
kvm_gfn_to_pfn_cache_refresh(struct kvm * kvm,struct gfn_to_pfn_cache * gpc,gpa_t gpa,unsigned long len)241 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
242 				 gpa_t gpa, unsigned long len)
243 {
244 	struct kvm_memslots *slots = kvm_memslots(kvm);
245 	unsigned long page_offset = gpa & ~PAGE_MASK;
246 	bool unmap_old = false;
247 	unsigned long old_uhva;
248 	kvm_pfn_t old_pfn;
249 	void *old_khva;
250 	int ret;
251 
252 	/*
253 	 * If must fit within a single page. The 'len' argument is
254 	 * only to enforce that.
255 	 */
256 	if (page_offset + len > PAGE_SIZE)
257 		return -EINVAL;
258 
259 	/*
260 	 * If another task is refreshing the cache, wait for it to complete.
261 	 * There is no guarantee that concurrent refreshes will see the same
262 	 * gpa, memslots generation, etc..., so they must be fully serialized.
263 	 */
264 	mutex_lock(&gpc->refresh_lock);
265 
266 	write_lock_irq(&gpc->lock);
267 
268 	if (!gpc->active) {
269 		ret = -EINVAL;
270 		goto out_unlock;
271 	}
272 
273 	old_pfn = gpc->pfn;
274 	old_khva = gpc->khva - offset_in_page(gpc->khva);
275 	old_uhva = gpc->uhva;
276 
277 	/* If the userspace HVA is invalid, refresh that first */
278 	if (gpc->gpa != gpa || gpc->generation != slots->generation ||
279 	    kvm_is_error_hva(gpc->uhva)) {
280 		gfn_t gfn = gpa_to_gfn(gpa);
281 
282 		gpc->gpa = gpa;
283 		gpc->generation = slots->generation;
284 		gpc->memslot = __gfn_to_memslot(slots, gfn);
285 		gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
286 
287 		if (kvm_is_error_hva(gpc->uhva)) {
288 			ret = -EFAULT;
289 			goto out;
290 		}
291 	}
292 
293 	/*
294 	 * If the userspace HVA changed or the PFN was already invalid,
295 	 * drop the lock and do the HVA to PFN lookup again.
296 	 */
297 	if (!gpc->valid || old_uhva != gpc->uhva) {
298 		ret = hva_to_pfn_retry(kvm, gpc);
299 	} else {
300 		/*
301 		 * If the HVA→PFN mapping was already valid, don't unmap it.
302 		 * But do update gpc->khva because the offset within the page
303 		 * may have changed.
304 		 */
305 		gpc->khva = old_khva + page_offset;
306 		old_pfn = KVM_PFN_ERR_FAULT;
307 		old_khva = NULL;
308 		ret = 0;
309 	}
310 
311  out:
312 	/*
313 	 * Invalidate the cache and purge the pfn/khva if the refresh failed.
314 	 * Some/all of the uhva, gpa, and memslot generation info may still be
315 	 * valid, leave it as is.
316 	 */
317 	if (ret) {
318 		gpc->valid = false;
319 		gpc->pfn = KVM_PFN_ERR_FAULT;
320 		gpc->khva = NULL;
321 	}
322 
323 	/* Detect a pfn change before dropping the lock! */
324 	unmap_old = (old_pfn != gpc->pfn);
325 
326 out_unlock:
327 	write_unlock_irq(&gpc->lock);
328 
329 	mutex_unlock(&gpc->refresh_lock);
330 
331 	if (unmap_old)
332 		gpc_unmap_khva(kvm, old_pfn, old_khva);
333 
334 	return ret;
335 }
336 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
337 
kvm_gfn_to_pfn_cache_unmap(struct kvm * kvm,struct gfn_to_pfn_cache * gpc)338 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
339 {
340 	void *old_khva;
341 	kvm_pfn_t old_pfn;
342 
343 	mutex_lock(&gpc->refresh_lock);
344 	write_lock_irq(&gpc->lock);
345 
346 	gpc->valid = false;
347 
348 	old_khva = gpc->khva - offset_in_page(gpc->khva);
349 	old_pfn = gpc->pfn;
350 
351 	/*
352 	 * We can leave the GPA → uHVA map cache intact but the PFN
353 	 * lookup will need to be redone even for the same page.
354 	 */
355 	gpc->khva = NULL;
356 	gpc->pfn = KVM_PFN_ERR_FAULT;
357 
358 	write_unlock_irq(&gpc->lock);
359 	mutex_unlock(&gpc->refresh_lock);
360 
361 	gpc_unmap_khva(kvm, old_pfn, old_khva);
362 }
363 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
364 
kvm_gpc_init(struct gfn_to_pfn_cache * gpc)365 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
366 {
367 	rwlock_init(&gpc->lock);
368 	mutex_init(&gpc->refresh_lock);
369 }
370 EXPORT_SYMBOL_GPL(kvm_gpc_init);
371 
kvm_gpc_activate(struct kvm * kvm,struct gfn_to_pfn_cache * gpc,struct kvm_vcpu * vcpu,enum pfn_cache_usage usage,gpa_t gpa,unsigned long len)372 int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
373 		     struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
374 		     gpa_t gpa, unsigned long len)
375 {
376 	WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
377 
378 	if (!gpc->active) {
379 		gpc->khva = NULL;
380 		gpc->pfn = KVM_PFN_ERR_FAULT;
381 		gpc->uhva = KVM_HVA_ERR_BAD;
382 		gpc->vcpu = vcpu;
383 		gpc->usage = usage;
384 		gpc->valid = false;
385 
386 		spin_lock(&kvm->gpc_lock);
387 		list_add(&gpc->list, &kvm->gpc_list);
388 		spin_unlock(&kvm->gpc_lock);
389 
390 		/*
391 		 * Activate the cache after adding it to the list, a concurrent
392 		 * refresh must not establish a mapping until the cache is
393 		 * reachable by mmu_notifier events.
394 		 */
395 		write_lock_irq(&gpc->lock);
396 		gpc->active = true;
397 		write_unlock_irq(&gpc->lock);
398 	}
399 	return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
400 }
401 EXPORT_SYMBOL_GPL(kvm_gpc_activate);
402 
kvm_gpc_deactivate(struct kvm * kvm,struct gfn_to_pfn_cache * gpc)403 void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
404 {
405 	if (gpc->active) {
406 		/*
407 		 * Deactivate the cache before removing it from the list, KVM
408 		 * must stall mmu_notifier events until all users go away, i.e.
409 		 * until gpc->lock is dropped and refresh is guaranteed to fail.
410 		 */
411 		write_lock_irq(&gpc->lock);
412 		gpc->active = false;
413 		write_unlock_irq(&gpc->lock);
414 
415 		spin_lock(&kvm->gpc_lock);
416 		list_del(&gpc->list);
417 		spin_unlock(&kvm->gpc_lock);
418 
419 		kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
420 	}
421 }
422 EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
423