1 /*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36 #include "i915_drv.h"
37 #include "gvt.h"
38 #include "i915_pvinfo.h"
39 #include "trace.h"
40
41 #include "gt/intel_gt_regs.h"
42
43 #if defined(VERBOSE_DEBUG)
44 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
45 #else
46 #define gvt_vdbg_mm(fmt, args...)
47 #endif
48
49 static bool enable_out_of_sync = false;
50 static int preallocated_oos_pages = 8192;
51
intel_gvt_is_valid_gfn(struct intel_vgpu * vgpu,unsigned long gfn)52 static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
53 {
54 struct kvm *kvm = vgpu->vfio_device.kvm;
55 int idx;
56 bool ret;
57
58 if (!vgpu->attached)
59 return false;
60
61 idx = srcu_read_lock(&kvm->srcu);
62 ret = kvm_is_visible_gfn(kvm, gfn);
63 srcu_read_unlock(&kvm->srcu, idx);
64
65 return ret;
66 }
67
68 /*
69 * validate a gm address and related range size,
70 * translate it to host gm address
71 */
intel_gvt_ggtt_validate_range(struct intel_vgpu * vgpu,u64 addr,u32 size)72 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
73 {
74 if (size == 0)
75 return vgpu_gmadr_is_valid(vgpu, addr);
76
77 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
78 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
79 return true;
80 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
81 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
82 return true;
83
84 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
85 addr, size);
86 return false;
87 }
88
89 /* translate a guest gmadr to host gmadr */
intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu * vgpu,u64 g_addr,u64 * h_addr)90 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
91 {
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
93
94 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
95 "invalid guest gmadr %llx\n", g_addr))
96 return -EACCES;
97
98 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
99 *h_addr = vgpu_aperture_gmadr_base(vgpu)
100 + (g_addr - vgpu_aperture_offset(vgpu));
101 else
102 *h_addr = vgpu_hidden_gmadr_base(vgpu)
103 + (g_addr - vgpu_hidden_offset(vgpu));
104 return 0;
105 }
106
107 /* translate a host gmadr to guest gmadr */
intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu * vgpu,u64 h_addr,u64 * g_addr)108 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
109 {
110 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
111
112 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
113 "invalid host gmadr %llx\n", h_addr))
114 return -EACCES;
115
116 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
117 *g_addr = vgpu_aperture_gmadr_base(vgpu)
118 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
119 else
120 *g_addr = vgpu_hidden_gmadr_base(vgpu)
121 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
122 return 0;
123 }
124
intel_gvt_ggtt_index_g2h(struct intel_vgpu * vgpu,unsigned long g_index,unsigned long * h_index)125 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
126 unsigned long *h_index)
127 {
128 u64 h_addr;
129 int ret;
130
131 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
132 &h_addr);
133 if (ret)
134 return ret;
135
136 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
137 return 0;
138 }
139
intel_gvt_ggtt_h2g_index(struct intel_vgpu * vgpu,unsigned long h_index,unsigned long * g_index)140 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
141 unsigned long *g_index)
142 {
143 u64 g_addr;
144 int ret;
145
146 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
147 &g_addr);
148 if (ret)
149 return ret;
150
151 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
152 return 0;
153 }
154
155 #define gtt_type_is_entry(type) \
156 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
157 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
158 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
159
160 #define gtt_type_is_pt(type) \
161 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
162
163 #define gtt_type_is_pte_pt(type) \
164 (type == GTT_TYPE_PPGTT_PTE_PT)
165
166 #define gtt_type_is_root_pointer(type) \
167 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
168
169 #define gtt_init_entry(e, t, p, v) do { \
170 (e)->type = t; \
171 (e)->pdev = p; \
172 memcpy(&(e)->val64, &v, sizeof(v)); \
173 } while (0)
174
175 /*
176 * Mappings between GTT_TYPE* enumerations.
177 * Following information can be found according to the given type:
178 * - type of next level page table
179 * - type of entry inside this level page table
180 * - type of entry with PSE set
181 *
182 * If the given type doesn't have such a kind of information,
183 * e.g. give a l4 root entry type, then request to get its PSE type,
184 * give a PTE page table type, then request to get its next level page
185 * table type, as we know l4 root entry doesn't have a PSE bit,
186 * and a PTE page table doesn't have a next level page table type,
187 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
188 * page table.
189 */
190
191 struct gtt_type_table_entry {
192 int entry_type;
193 int pt_type;
194 int next_pt_type;
195 int pse_entry_type;
196 };
197
198 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
199 [type] = { \
200 .entry_type = e_type, \
201 .pt_type = cpt_type, \
202 .next_pt_type = npt_type, \
203 .pse_entry_type = pse_type, \
204 }
205
206 static const struct gtt_type_table_entry gtt_type_table[] = {
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
208 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
209 GTT_TYPE_INVALID,
210 GTT_TYPE_PPGTT_PML4_PT,
211 GTT_TYPE_INVALID),
212 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
213 GTT_TYPE_PPGTT_PML4_ENTRY,
214 GTT_TYPE_PPGTT_PML4_PT,
215 GTT_TYPE_PPGTT_PDP_PT,
216 GTT_TYPE_INVALID),
217 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
218 GTT_TYPE_PPGTT_PML4_ENTRY,
219 GTT_TYPE_PPGTT_PML4_PT,
220 GTT_TYPE_PPGTT_PDP_PT,
221 GTT_TYPE_INVALID),
222 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
223 GTT_TYPE_PPGTT_PDP_ENTRY,
224 GTT_TYPE_PPGTT_PDP_PT,
225 GTT_TYPE_PPGTT_PDE_PT,
226 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
227 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
228 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
229 GTT_TYPE_INVALID,
230 GTT_TYPE_PPGTT_PDE_PT,
231 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
232 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
233 GTT_TYPE_PPGTT_PDP_ENTRY,
234 GTT_TYPE_PPGTT_PDP_PT,
235 GTT_TYPE_PPGTT_PDE_PT,
236 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
237 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
238 GTT_TYPE_PPGTT_PDE_ENTRY,
239 GTT_TYPE_PPGTT_PDE_PT,
240 GTT_TYPE_PPGTT_PTE_PT,
241 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
242 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
243 GTT_TYPE_PPGTT_PDE_ENTRY,
244 GTT_TYPE_PPGTT_PDE_PT,
245 GTT_TYPE_PPGTT_PTE_PT,
246 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
247 /* We take IPS bit as 'PSE' for PTE level. */
248 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
249 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
250 GTT_TYPE_PPGTT_PTE_PT,
251 GTT_TYPE_INVALID,
252 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
253 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
254 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
255 GTT_TYPE_PPGTT_PTE_PT,
256 GTT_TYPE_INVALID,
257 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
258 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
259 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
260 GTT_TYPE_PPGTT_PTE_PT,
261 GTT_TYPE_INVALID,
262 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
263 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
264 GTT_TYPE_PPGTT_PDE_ENTRY,
265 GTT_TYPE_PPGTT_PDE_PT,
266 GTT_TYPE_INVALID,
267 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
268 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
269 GTT_TYPE_PPGTT_PDP_ENTRY,
270 GTT_TYPE_PPGTT_PDP_PT,
271 GTT_TYPE_INVALID,
272 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
273 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
274 GTT_TYPE_GGTT_PTE,
275 GTT_TYPE_INVALID,
276 GTT_TYPE_INVALID,
277 GTT_TYPE_INVALID),
278 };
279
get_next_pt_type(int type)280 static inline int get_next_pt_type(int type)
281 {
282 return gtt_type_table[type].next_pt_type;
283 }
284
get_pt_type(int type)285 static inline int get_pt_type(int type)
286 {
287 return gtt_type_table[type].pt_type;
288 }
289
get_entry_type(int type)290 static inline int get_entry_type(int type)
291 {
292 return gtt_type_table[type].entry_type;
293 }
294
get_pse_type(int type)295 static inline int get_pse_type(int type)
296 {
297 return gtt_type_table[type].pse_entry_type;
298 }
299
read_pte64(struct i915_ggtt * ggtt,unsigned long index)300 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
301 {
302 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
303
304 return readq(addr);
305 }
306
ggtt_invalidate(struct intel_gt * gt)307 static void ggtt_invalidate(struct intel_gt *gt)
308 {
309 mmio_hw_access_pre(gt);
310 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
311 mmio_hw_access_post(gt);
312 }
313
write_pte64(struct i915_ggtt * ggtt,unsigned long index,u64 pte)314 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
315 {
316 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
317
318 writeq(pte, addr);
319 }
320
gtt_get_entry64(void * pt,struct intel_gvt_gtt_entry * e,unsigned long index,bool hypervisor_access,unsigned long gpa,struct intel_vgpu * vgpu)321 static inline int gtt_get_entry64(void *pt,
322 struct intel_gvt_gtt_entry *e,
323 unsigned long index, bool hypervisor_access, unsigned long gpa,
324 struct intel_vgpu *vgpu)
325 {
326 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
327 int ret;
328
329 if (WARN_ON(info->gtt_entry_size != 8))
330 return -EINVAL;
331
332 if (hypervisor_access) {
333 ret = intel_gvt_read_gpa(vgpu, gpa +
334 (index << info->gtt_entry_size_shift),
335 &e->val64, 8);
336 if (WARN_ON(ret))
337 return ret;
338 } else if (!pt) {
339 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
340 } else {
341 e->val64 = *((u64 *)pt + index);
342 }
343 return 0;
344 }
345
gtt_set_entry64(void * pt,struct intel_gvt_gtt_entry * e,unsigned long index,bool hypervisor_access,unsigned long gpa,struct intel_vgpu * vgpu)346 static inline int gtt_set_entry64(void *pt,
347 struct intel_gvt_gtt_entry *e,
348 unsigned long index, bool hypervisor_access, unsigned long gpa,
349 struct intel_vgpu *vgpu)
350 {
351 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
352 int ret;
353
354 if (WARN_ON(info->gtt_entry_size != 8))
355 return -EINVAL;
356
357 if (hypervisor_access) {
358 ret = intel_gvt_write_gpa(vgpu, gpa +
359 (index << info->gtt_entry_size_shift),
360 &e->val64, 8);
361 if (WARN_ON(ret))
362 return ret;
363 } else if (!pt) {
364 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
365 } else {
366 *((u64 *)pt + index) = e->val64;
367 }
368 return 0;
369 }
370
371 #define GTT_HAW 46
372
373 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
374 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
375 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
376 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
377
378 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
379 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
380
381 #define GTT_64K_PTE_STRIDE 16
382
gen8_gtt_get_pfn(struct intel_gvt_gtt_entry * e)383 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
384 {
385 unsigned long pfn;
386
387 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
388 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
389 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
390 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
391 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
392 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
393 else
394 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
395 return pfn;
396 }
397
gen8_gtt_set_pfn(struct intel_gvt_gtt_entry * e,unsigned long pfn)398 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
399 {
400 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
401 e->val64 &= ~ADDR_1G_MASK;
402 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
403 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
404 e->val64 &= ~ADDR_2M_MASK;
405 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
406 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
407 e->val64 &= ~ADDR_64K_MASK;
408 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
409 } else {
410 e->val64 &= ~ADDR_4K_MASK;
411 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
412 }
413
414 e->val64 |= (pfn << PAGE_SHIFT);
415 }
416
gen8_gtt_test_pse(struct intel_gvt_gtt_entry * e)417 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
418 {
419 return !!(e->val64 & _PAGE_PSE);
420 }
421
gen8_gtt_clear_pse(struct intel_gvt_gtt_entry * e)422 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
423 {
424 if (gen8_gtt_test_pse(e)) {
425 switch (e->type) {
426 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
427 e->val64 &= ~_PAGE_PSE;
428 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
429 break;
430 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
431 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
432 e->val64 &= ~_PAGE_PSE;
433 break;
434 default:
435 WARN_ON(1);
436 }
437 }
438 }
439
gen8_gtt_test_ips(struct intel_gvt_gtt_entry * e)440 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
441 {
442 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
443 return false;
444
445 return !!(e->val64 & GEN8_PDE_IPS_64K);
446 }
447
gen8_gtt_clear_ips(struct intel_gvt_gtt_entry * e)448 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
449 {
450 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
451 return;
452
453 e->val64 &= ~GEN8_PDE_IPS_64K;
454 }
455
gen8_gtt_test_present(struct intel_gvt_gtt_entry * e)456 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
457 {
458 /*
459 * i915 writes PDP root pointer registers without present bit,
460 * it also works, so we need to treat root pointer entry
461 * specifically.
462 */
463 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
464 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
465 return (e->val64 != 0);
466 else
467 return (e->val64 & GEN8_PAGE_PRESENT);
468 }
469
gtt_entry_clear_present(struct intel_gvt_gtt_entry * e)470 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
471 {
472 e->val64 &= ~GEN8_PAGE_PRESENT;
473 }
474
gtt_entry_set_present(struct intel_gvt_gtt_entry * e)475 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
476 {
477 e->val64 |= GEN8_PAGE_PRESENT;
478 }
479
gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry * e)480 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
481 {
482 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
483 }
484
gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry * e)485 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
486 {
487 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
488 }
489
gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry * e)490 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
491 {
492 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
493 }
494
495 /*
496 * Per-platform GMA routines.
497 */
gma_to_ggtt_pte_index(unsigned long gma)498 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
499 {
500 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
501
502 trace_gma_index(__func__, gma, x);
503 return x;
504 }
505
506 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
507 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
508 { \
509 unsigned long x = (exp); \
510 trace_gma_index(__func__, gma, x); \
511 return x; \
512 }
513
514 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
515 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
516 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
517 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
518 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
519
520 static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
521 .get_entry = gtt_get_entry64,
522 .set_entry = gtt_set_entry64,
523 .clear_present = gtt_entry_clear_present,
524 .set_present = gtt_entry_set_present,
525 .test_present = gen8_gtt_test_present,
526 .test_pse = gen8_gtt_test_pse,
527 .clear_pse = gen8_gtt_clear_pse,
528 .clear_ips = gen8_gtt_clear_ips,
529 .test_ips = gen8_gtt_test_ips,
530 .clear_64k_splited = gen8_gtt_clear_64k_splited,
531 .set_64k_splited = gen8_gtt_set_64k_splited,
532 .test_64k_splited = gen8_gtt_test_64k_splited,
533 .get_pfn = gen8_gtt_get_pfn,
534 .set_pfn = gen8_gtt_set_pfn,
535 };
536
537 static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
538 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
539 .gma_to_pte_index = gen8_gma_to_pte_index,
540 .gma_to_pde_index = gen8_gma_to_pde_index,
541 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
542 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
543 .gma_to_pml4_index = gen8_gma_to_pml4_index,
544 };
545
546 /* Update entry type per pse and ips bit. */
update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops * pte_ops,struct intel_gvt_gtt_entry * entry,bool ips)547 static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
548 struct intel_gvt_gtt_entry *entry, bool ips)
549 {
550 switch (entry->type) {
551 case GTT_TYPE_PPGTT_PDE_ENTRY:
552 case GTT_TYPE_PPGTT_PDP_ENTRY:
553 if (pte_ops->test_pse(entry))
554 entry->type = get_pse_type(entry->type);
555 break;
556 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
557 if (ips)
558 entry->type = get_pse_type(entry->type);
559 break;
560 default:
561 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
562 }
563
564 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
565 }
566
567 /*
568 * MM helpers.
569 */
_ppgtt_get_root_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index,bool guest)570 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
571 struct intel_gvt_gtt_entry *entry, unsigned long index,
572 bool guest)
573 {
574 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
575
576 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
577
578 entry->type = mm->ppgtt_mm.root_entry_type;
579 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
580 mm->ppgtt_mm.shadow_pdps,
581 entry, index, false, 0, mm->vgpu);
582 update_entry_type_for_real(pte_ops, entry, false);
583 }
584
ppgtt_get_guest_root_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)585 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
586 struct intel_gvt_gtt_entry *entry, unsigned long index)
587 {
588 _ppgtt_get_root_entry(mm, entry, index, true);
589 }
590
ppgtt_get_shadow_root_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)591 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
592 struct intel_gvt_gtt_entry *entry, unsigned long index)
593 {
594 _ppgtt_get_root_entry(mm, entry, index, false);
595 }
596
_ppgtt_set_root_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index,bool guest)597 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
598 struct intel_gvt_gtt_entry *entry, unsigned long index,
599 bool guest)
600 {
601 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
602
603 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
604 mm->ppgtt_mm.shadow_pdps,
605 entry, index, false, 0, mm->vgpu);
606 }
607
ppgtt_set_shadow_root_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)608 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
609 struct intel_gvt_gtt_entry *entry, unsigned long index)
610 {
611 _ppgtt_set_root_entry(mm, entry, index, false);
612 }
613
ggtt_get_guest_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)614 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
615 struct intel_gvt_gtt_entry *entry, unsigned long index)
616 {
617 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
618
619 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
620
621 entry->type = GTT_TYPE_GGTT_PTE;
622 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
623 false, 0, mm->vgpu);
624 }
625
ggtt_set_guest_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)626 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
627 struct intel_gvt_gtt_entry *entry, unsigned long index)
628 {
629 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630
631 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
632
633 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
634 false, 0, mm->vgpu);
635 }
636
ggtt_get_host_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)637 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
638 struct intel_gvt_gtt_entry *entry, unsigned long index)
639 {
640 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
641
642 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
643
644 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
645 }
646
ggtt_set_host_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * entry,unsigned long index)647 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
648 struct intel_gvt_gtt_entry *entry, unsigned long index)
649 {
650 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
651 unsigned long offset = index;
652
653 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
654
655 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
656 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
657 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
658 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
659 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
660 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
661 }
662
663 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
664 }
665
666 /*
667 * PPGTT shadow page table helpers.
668 */
ppgtt_spt_get_entry(struct intel_vgpu_ppgtt_spt * spt,void * page_table,int type,struct intel_gvt_gtt_entry * e,unsigned long index,bool guest)669 static inline int ppgtt_spt_get_entry(
670 struct intel_vgpu_ppgtt_spt *spt,
671 void *page_table, int type,
672 struct intel_gvt_gtt_entry *e, unsigned long index,
673 bool guest)
674 {
675 struct intel_gvt *gvt = spt->vgpu->gvt;
676 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
677 int ret;
678
679 e->type = get_entry_type(type);
680
681 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
682 return -EINVAL;
683
684 ret = ops->get_entry(page_table, e, index, guest,
685 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
686 spt->vgpu);
687 if (ret)
688 return ret;
689
690 update_entry_type_for_real(ops, e, guest ?
691 spt->guest_page.pde_ips : false);
692
693 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
694 type, e->type, index, e->val64);
695 return 0;
696 }
697
ppgtt_spt_set_entry(struct intel_vgpu_ppgtt_spt * spt,void * page_table,int type,struct intel_gvt_gtt_entry * e,unsigned long index,bool guest)698 static inline int ppgtt_spt_set_entry(
699 struct intel_vgpu_ppgtt_spt *spt,
700 void *page_table, int type,
701 struct intel_gvt_gtt_entry *e, unsigned long index,
702 bool guest)
703 {
704 struct intel_gvt *gvt = spt->vgpu->gvt;
705 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
706
707 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
708 return -EINVAL;
709
710 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
711 type, e->type, index, e->val64);
712
713 return ops->set_entry(page_table, e, index, guest,
714 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
715 spt->vgpu);
716 }
717
718 #define ppgtt_get_guest_entry(spt, e, index) \
719 ppgtt_spt_get_entry(spt, NULL, \
720 spt->guest_page.type, e, index, true)
721
722 #define ppgtt_set_guest_entry(spt, e, index) \
723 ppgtt_spt_set_entry(spt, NULL, \
724 spt->guest_page.type, e, index, true)
725
726 #define ppgtt_get_shadow_entry(spt, e, index) \
727 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
728 spt->shadow_page.type, e, index, false)
729
730 #define ppgtt_set_shadow_entry(spt, e, index) \
731 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
732 spt->shadow_page.type, e, index, false)
733
alloc_spt(gfp_t gfp_mask)734 static void *alloc_spt(gfp_t gfp_mask)
735 {
736 struct intel_vgpu_ppgtt_spt *spt;
737
738 spt = kzalloc(sizeof(*spt), gfp_mask);
739 if (!spt)
740 return NULL;
741
742 spt->shadow_page.page = alloc_page(gfp_mask);
743 if (!spt->shadow_page.page) {
744 kfree(spt);
745 return NULL;
746 }
747 return spt;
748 }
749
free_spt(struct intel_vgpu_ppgtt_spt * spt)750 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
751 {
752 __free_page(spt->shadow_page.page);
753 kfree(spt);
754 }
755
756 static int detach_oos_page(struct intel_vgpu *vgpu,
757 struct intel_vgpu_oos_page *oos_page);
758
ppgtt_free_spt(struct intel_vgpu_ppgtt_spt * spt)759 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
760 {
761 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
762
763 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
764
765 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
766 DMA_BIDIRECTIONAL);
767
768 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
769
770 if (spt->guest_page.gfn) {
771 if (spt->guest_page.oos_page)
772 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
773
774 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
775 }
776
777 list_del_init(&spt->post_shadow_list);
778 free_spt(spt);
779 }
780
ppgtt_free_all_spt(struct intel_vgpu * vgpu)781 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
782 {
783 struct intel_vgpu_ppgtt_spt *spt, *spn;
784 struct radix_tree_iter iter;
785 LIST_HEAD(all_spt);
786 void __rcu **slot;
787
788 rcu_read_lock();
789 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
790 spt = radix_tree_deref_slot(slot);
791 list_move(&spt->post_shadow_list, &all_spt);
792 }
793 rcu_read_unlock();
794
795 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
796 ppgtt_free_spt(spt);
797 }
798
799 static int ppgtt_handle_guest_write_page_table_bytes(
800 struct intel_vgpu_ppgtt_spt *spt,
801 u64 pa, void *p_data, int bytes);
802
ppgtt_write_protection_handler(struct intel_vgpu_page_track * page_track,u64 gpa,void * data,int bytes)803 static int ppgtt_write_protection_handler(
804 struct intel_vgpu_page_track *page_track,
805 u64 gpa, void *data, int bytes)
806 {
807 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
808
809 int ret;
810
811 if (bytes != 4 && bytes != 8)
812 return -EINVAL;
813
814 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
815 if (ret)
816 return ret;
817 return ret;
818 }
819
820 /* Find a spt by guest gfn. */
intel_vgpu_find_spt_by_gfn(struct intel_vgpu * vgpu,unsigned long gfn)821 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
822 struct intel_vgpu *vgpu, unsigned long gfn)
823 {
824 struct intel_vgpu_page_track *track;
825
826 track = intel_vgpu_find_page_track(vgpu, gfn);
827 if (track && track->handler == ppgtt_write_protection_handler)
828 return track->priv_data;
829
830 return NULL;
831 }
832
833 /* Find the spt by shadow page mfn. */
intel_vgpu_find_spt_by_mfn(struct intel_vgpu * vgpu,unsigned long mfn)834 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
835 struct intel_vgpu *vgpu, unsigned long mfn)
836 {
837 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
838 }
839
840 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
841
842 /* Allocate shadow page table without guest page. */
ppgtt_alloc_spt(struct intel_vgpu * vgpu,enum intel_gvt_gtt_type type)843 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
844 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
845 {
846 struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
847 struct intel_vgpu_ppgtt_spt *spt = NULL;
848 dma_addr_t daddr;
849 int ret;
850
851 retry:
852 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
853 if (!spt) {
854 if (reclaim_one_ppgtt_mm(vgpu->gvt))
855 goto retry;
856
857 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
858 return ERR_PTR(-ENOMEM);
859 }
860
861 spt->vgpu = vgpu;
862 atomic_set(&spt->refcount, 1);
863 INIT_LIST_HEAD(&spt->post_shadow_list);
864
865 /*
866 * Init shadow_page.
867 */
868 spt->shadow_page.type = type;
869 daddr = dma_map_page(kdev, spt->shadow_page.page,
870 0, 4096, DMA_BIDIRECTIONAL);
871 if (dma_mapping_error(kdev, daddr)) {
872 gvt_vgpu_err("fail to map dma addr\n");
873 ret = -EINVAL;
874 goto err_free_spt;
875 }
876 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
877 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
878
879 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
880 if (ret)
881 goto err_unmap_dma;
882
883 return spt;
884
885 err_unmap_dma:
886 dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
887 err_free_spt:
888 free_spt(spt);
889 return ERR_PTR(ret);
890 }
891
892 /* Allocate shadow page table associated with specific gfn. */
ppgtt_alloc_spt_gfn(struct intel_vgpu * vgpu,enum intel_gvt_gtt_type type,unsigned long gfn,bool guest_pde_ips)893 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
894 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
895 unsigned long gfn, bool guest_pde_ips)
896 {
897 struct intel_vgpu_ppgtt_spt *spt;
898 int ret;
899
900 spt = ppgtt_alloc_spt(vgpu, type);
901 if (IS_ERR(spt))
902 return spt;
903
904 /*
905 * Init guest_page.
906 */
907 ret = intel_vgpu_register_page_track(vgpu, gfn,
908 ppgtt_write_protection_handler, spt);
909 if (ret) {
910 ppgtt_free_spt(spt);
911 return ERR_PTR(ret);
912 }
913
914 spt->guest_page.type = type;
915 spt->guest_page.gfn = gfn;
916 spt->guest_page.pde_ips = guest_pde_ips;
917
918 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
919
920 return spt;
921 }
922
923 #define pt_entry_size_shift(spt) \
924 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
925
926 #define pt_entries(spt) \
927 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
928
929 #define for_each_present_guest_entry(spt, e, i) \
930 for (i = 0; i < pt_entries(spt); \
931 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
932 if (!ppgtt_get_guest_entry(spt, e, i) && \
933 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
934
935 #define for_each_present_shadow_entry(spt, e, i) \
936 for (i = 0; i < pt_entries(spt); \
937 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
938 if (!ppgtt_get_shadow_entry(spt, e, i) && \
939 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
940
941 #define for_each_shadow_entry(spt, e, i) \
942 for (i = 0; i < pt_entries(spt); \
943 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
944 if (!ppgtt_get_shadow_entry(spt, e, i))
945
ppgtt_get_spt(struct intel_vgpu_ppgtt_spt * spt)946 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
947 {
948 int v = atomic_read(&spt->refcount);
949
950 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
951 atomic_inc(&spt->refcount);
952 }
953
ppgtt_put_spt(struct intel_vgpu_ppgtt_spt * spt)954 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
955 {
956 int v = atomic_read(&spt->refcount);
957
958 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
959 return atomic_dec_return(&spt->refcount);
960 }
961
962 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
963
ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu * vgpu,struct intel_gvt_gtt_entry * e)964 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
965 struct intel_gvt_gtt_entry *e)
966 {
967 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
968 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
969 struct intel_vgpu_ppgtt_spt *s;
970 enum intel_gvt_gtt_type cur_pt_type;
971
972 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
973
974 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
975 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
976 cur_pt_type = get_next_pt_type(e->type);
977
978 if (!gtt_type_is_pt(cur_pt_type) ||
979 !gtt_type_is_pt(cur_pt_type + 1)) {
980 drm_WARN(&i915->drm, 1,
981 "Invalid page table type, cur_pt_type is: %d\n",
982 cur_pt_type);
983 return -EINVAL;
984 }
985
986 cur_pt_type += 1;
987
988 if (ops->get_pfn(e) ==
989 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
990 return 0;
991 }
992 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
993 if (!s) {
994 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
995 ops->get_pfn(e));
996 return -ENXIO;
997 }
998 return ppgtt_invalidate_spt(s);
999 }
1000
ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt * spt,struct intel_gvt_gtt_entry * entry)1001 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
1002 struct intel_gvt_gtt_entry *entry)
1003 {
1004 struct intel_vgpu *vgpu = spt->vgpu;
1005 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1006 unsigned long pfn;
1007 int type;
1008
1009 pfn = ops->get_pfn(entry);
1010 type = spt->shadow_page.type;
1011
1012 /* Uninitialized spte or unshadowed spte. */
1013 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1014 return;
1015
1016 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1017 }
1018
ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt * spt)1019 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1020 {
1021 struct intel_vgpu *vgpu = spt->vgpu;
1022 struct intel_gvt_gtt_entry e;
1023 unsigned long index;
1024 int ret;
1025
1026 trace_spt_change(spt->vgpu->id, "die", spt,
1027 spt->guest_page.gfn, spt->shadow_page.type);
1028
1029 if (ppgtt_put_spt(spt) > 0)
1030 return 0;
1031
1032 for_each_present_shadow_entry(spt, &e, index) {
1033 switch (e.type) {
1034 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1035 gvt_vdbg_mm("invalidate 4K entry\n");
1036 ppgtt_invalidate_pte(spt, &e);
1037 break;
1038 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1039 /* We don't setup 64K shadow entry so far. */
1040 WARN(1, "suspicious 64K gtt entry\n");
1041 continue;
1042 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1043 gvt_vdbg_mm("invalidate 2M entry\n");
1044 continue;
1045 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1046 WARN(1, "GVT doesn't support 1GB page\n");
1047 continue;
1048 case GTT_TYPE_PPGTT_PML4_ENTRY:
1049 case GTT_TYPE_PPGTT_PDP_ENTRY:
1050 case GTT_TYPE_PPGTT_PDE_ENTRY:
1051 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1052 ret = ppgtt_invalidate_spt_by_shadow_entry(
1053 spt->vgpu, &e);
1054 if (ret)
1055 goto fail;
1056 break;
1057 default:
1058 GEM_BUG_ON(1);
1059 }
1060 }
1061
1062 trace_spt_change(spt->vgpu->id, "release", spt,
1063 spt->guest_page.gfn, spt->shadow_page.type);
1064 ppgtt_free_spt(spt);
1065 return 0;
1066 fail:
1067 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1068 spt, e.val64, e.type);
1069 return ret;
1070 }
1071
vgpu_ips_enabled(struct intel_vgpu * vgpu)1072 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1073 {
1074 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1075
1076 if (GRAPHICS_VER(dev_priv) == 9) {
1077 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1078 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1079
1080 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1081 } else if (GRAPHICS_VER(dev_priv) >= 11) {
1082 /* 64K paging only controlled by IPS bit in PTE now. */
1083 return true;
1084 } else
1085 return false;
1086 }
1087
1088 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1089
ppgtt_populate_spt_by_guest_entry(struct intel_vgpu * vgpu,struct intel_gvt_gtt_entry * we)1090 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1091 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1092 {
1093 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1094 struct intel_vgpu_ppgtt_spt *spt = NULL;
1095 bool ips = false;
1096 int ret;
1097
1098 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1099
1100 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1101 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1102
1103 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1104 if (spt) {
1105 ppgtt_get_spt(spt);
1106
1107 if (ips != spt->guest_page.pde_ips) {
1108 spt->guest_page.pde_ips = ips;
1109
1110 gvt_dbg_mm("reshadow PDE since ips changed\n");
1111 clear_page(spt->shadow_page.vaddr);
1112 ret = ppgtt_populate_spt(spt);
1113 if (ret) {
1114 ppgtt_put_spt(spt);
1115 goto err;
1116 }
1117 }
1118 } else {
1119 int type = get_next_pt_type(we->type);
1120
1121 if (!gtt_type_is_pt(type)) {
1122 ret = -EINVAL;
1123 goto err;
1124 }
1125
1126 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1127 if (IS_ERR(spt)) {
1128 ret = PTR_ERR(spt);
1129 goto err;
1130 }
1131
1132 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1133 if (ret)
1134 goto err_free_spt;
1135
1136 ret = ppgtt_populate_spt(spt);
1137 if (ret)
1138 goto err_free_spt;
1139
1140 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1141 spt->shadow_page.type);
1142 }
1143 return spt;
1144
1145 err_free_spt:
1146 ppgtt_free_spt(spt);
1147 spt = NULL;
1148 err:
1149 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1150 spt, we->val64, we->type);
1151 return ERR_PTR(ret);
1152 }
1153
ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry * se,struct intel_vgpu_ppgtt_spt * s,struct intel_gvt_gtt_entry * ge)1154 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1155 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1156 {
1157 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1158
1159 se->type = ge->type;
1160 se->val64 = ge->val64;
1161
1162 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1163 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1164 ops->clear_ips(se);
1165
1166 ops->set_pfn(se, s->shadow_page.mfn);
1167 }
1168
1169 /*
1170 * Check if can do 2M page
1171 * @vgpu: target vgpu
1172 * @entry: target pfn's gtt entry
1173 *
1174 * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
1175 * negative if found err.
1176 */
is_2MB_gtt_possible(struct intel_vgpu * vgpu,struct intel_gvt_gtt_entry * entry)1177 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1178 struct intel_gvt_gtt_entry *entry)
1179 {
1180 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1181 kvm_pfn_t pfn;
1182
1183 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1184 return 0;
1185
1186 if (!vgpu->attached)
1187 return -EINVAL;
1188 pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
1189 if (is_error_noslot_pfn(pfn))
1190 return -EINVAL;
1191 return PageTransHuge(pfn_to_page(pfn));
1192 }
1193
split_2MB_gtt_entry(struct intel_vgpu * vgpu,struct intel_vgpu_ppgtt_spt * spt,unsigned long index,struct intel_gvt_gtt_entry * se)1194 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1195 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1196 struct intel_gvt_gtt_entry *se)
1197 {
1198 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1199 struct intel_vgpu_ppgtt_spt *sub_spt;
1200 struct intel_gvt_gtt_entry sub_se;
1201 unsigned long start_gfn;
1202 dma_addr_t dma_addr;
1203 unsigned long sub_index;
1204 int ret;
1205
1206 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1207
1208 start_gfn = ops->get_pfn(se);
1209
1210 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1211 if (IS_ERR(sub_spt))
1212 return PTR_ERR(sub_spt);
1213
1214 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1215 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1216 PAGE_SIZE, &dma_addr);
1217 if (ret)
1218 goto err;
1219 sub_se.val64 = se->val64;
1220
1221 /* Copy the PAT field from PDE. */
1222 sub_se.val64 &= ~_PAGE_PAT;
1223 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1224
1225 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1226 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1227 }
1228
1229 /* Clear dirty field. */
1230 se->val64 &= ~_PAGE_DIRTY;
1231
1232 ops->clear_pse(se);
1233 ops->clear_ips(se);
1234 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1235 ppgtt_set_shadow_entry(spt, se, index);
1236 return 0;
1237 err:
1238 /* Cancel the existing addess mappings of DMA addr. */
1239 for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1240 gvt_vdbg_mm("invalidate 4K entry\n");
1241 ppgtt_invalidate_pte(sub_spt, &sub_se);
1242 }
1243 /* Release the new allocated spt. */
1244 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1245 sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1246 ppgtt_free_spt(sub_spt);
1247 return ret;
1248 }
1249
split_64KB_gtt_entry(struct intel_vgpu * vgpu,struct intel_vgpu_ppgtt_spt * spt,unsigned long index,struct intel_gvt_gtt_entry * se)1250 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1251 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1252 struct intel_gvt_gtt_entry *se)
1253 {
1254 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1255 struct intel_gvt_gtt_entry entry = *se;
1256 unsigned long start_gfn;
1257 dma_addr_t dma_addr;
1258 int i, ret;
1259
1260 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1261
1262 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1263
1264 start_gfn = ops->get_pfn(se);
1265
1266 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1267 ops->set_64k_splited(&entry);
1268
1269 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1270 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1271 PAGE_SIZE, &dma_addr);
1272 if (ret)
1273 return ret;
1274
1275 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1276 ppgtt_set_shadow_entry(spt, &entry, index + i);
1277 }
1278 return 0;
1279 }
1280
ppgtt_populate_shadow_entry(struct intel_vgpu * vgpu,struct intel_vgpu_ppgtt_spt * spt,unsigned long index,struct intel_gvt_gtt_entry * ge)1281 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1282 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1283 struct intel_gvt_gtt_entry *ge)
1284 {
1285 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1286 struct intel_gvt_gtt_entry se = *ge;
1287 unsigned long gfn, page_size = PAGE_SIZE;
1288 dma_addr_t dma_addr;
1289 int ret;
1290
1291 if (!pte_ops->test_present(ge))
1292 return 0;
1293
1294 gfn = pte_ops->get_pfn(ge);
1295
1296 switch (ge->type) {
1297 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1298 gvt_vdbg_mm("shadow 4K gtt entry\n");
1299 break;
1300 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1301 gvt_vdbg_mm("shadow 64K gtt entry\n");
1302 /*
1303 * The layout of 64K page is special, the page size is
1304 * controlled by uper PDE. To be simple, we always split
1305 * 64K page to smaller 4K pages in shadow PT.
1306 */
1307 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1308 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1309 gvt_vdbg_mm("shadow 2M gtt entry\n");
1310 ret = is_2MB_gtt_possible(vgpu, ge);
1311 if (ret == 0)
1312 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1313 else if (ret < 0)
1314 return ret;
1315 page_size = I915_GTT_PAGE_SIZE_2M;
1316 break;
1317 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1318 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1319 return -EINVAL;
1320 default:
1321 GEM_BUG_ON(1);
1322 }
1323
1324 /* direct shadow */
1325 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
1326 if (ret)
1327 return -ENXIO;
1328
1329 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1330 ppgtt_set_shadow_entry(spt, &se, index);
1331 return 0;
1332 }
1333
ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt * spt)1334 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1335 {
1336 struct intel_vgpu *vgpu = spt->vgpu;
1337 struct intel_gvt *gvt = vgpu->gvt;
1338 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1339 struct intel_vgpu_ppgtt_spt *s;
1340 struct intel_gvt_gtt_entry se, ge;
1341 unsigned long gfn, i;
1342 int ret;
1343
1344 trace_spt_change(spt->vgpu->id, "born", spt,
1345 spt->guest_page.gfn, spt->shadow_page.type);
1346
1347 for_each_present_guest_entry(spt, &ge, i) {
1348 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1349 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1350 if (IS_ERR(s)) {
1351 ret = PTR_ERR(s);
1352 goto fail;
1353 }
1354 ppgtt_get_shadow_entry(spt, &se, i);
1355 ppgtt_generate_shadow_entry(&se, s, &ge);
1356 ppgtt_set_shadow_entry(spt, &se, i);
1357 } else {
1358 gfn = ops->get_pfn(&ge);
1359 if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
1360 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1361 ppgtt_set_shadow_entry(spt, &se, i);
1362 continue;
1363 }
1364
1365 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1366 if (ret)
1367 goto fail;
1368 }
1369 }
1370 return 0;
1371 fail:
1372 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1373 spt, ge.val64, ge.type);
1374 return ret;
1375 }
1376
ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt * spt,struct intel_gvt_gtt_entry * se,unsigned long index)1377 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1378 struct intel_gvt_gtt_entry *se, unsigned long index)
1379 {
1380 struct intel_vgpu *vgpu = spt->vgpu;
1381 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1382 int ret;
1383
1384 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1385 spt->shadow_page.type, se->val64, index);
1386
1387 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1388 se->type, index, se->val64);
1389
1390 if (!ops->test_present(se))
1391 return 0;
1392
1393 if (ops->get_pfn(se) ==
1394 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1395 return 0;
1396
1397 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1398 struct intel_vgpu_ppgtt_spt *s =
1399 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1400 if (!s) {
1401 gvt_vgpu_err("fail to find guest page\n");
1402 ret = -ENXIO;
1403 goto fail;
1404 }
1405 ret = ppgtt_invalidate_spt(s);
1406 if (ret)
1407 goto fail;
1408 } else {
1409 /* We don't setup 64K shadow entry so far. */
1410 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1411 "suspicious 64K entry\n");
1412 ppgtt_invalidate_pte(spt, se);
1413 }
1414
1415 return 0;
1416 fail:
1417 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1418 spt, se->val64, se->type);
1419 return ret;
1420 }
1421
ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt * spt,struct intel_gvt_gtt_entry * we,unsigned long index)1422 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1423 struct intel_gvt_gtt_entry *we, unsigned long index)
1424 {
1425 struct intel_vgpu *vgpu = spt->vgpu;
1426 struct intel_gvt_gtt_entry m;
1427 struct intel_vgpu_ppgtt_spt *s;
1428 int ret;
1429
1430 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1431 we->val64, index);
1432
1433 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1434 we->type, index, we->val64);
1435
1436 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1437 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1438 if (IS_ERR(s)) {
1439 ret = PTR_ERR(s);
1440 goto fail;
1441 }
1442 ppgtt_get_shadow_entry(spt, &m, index);
1443 ppgtt_generate_shadow_entry(&m, s, we);
1444 ppgtt_set_shadow_entry(spt, &m, index);
1445 } else {
1446 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1447 if (ret)
1448 goto fail;
1449 }
1450 return 0;
1451 fail:
1452 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1453 spt, we->val64, we->type);
1454 return ret;
1455 }
1456
sync_oos_page(struct intel_vgpu * vgpu,struct intel_vgpu_oos_page * oos_page)1457 static int sync_oos_page(struct intel_vgpu *vgpu,
1458 struct intel_vgpu_oos_page *oos_page)
1459 {
1460 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1461 struct intel_gvt *gvt = vgpu->gvt;
1462 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1463 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1464 struct intel_gvt_gtt_entry old, new;
1465 int index;
1466 int ret;
1467
1468 trace_oos_change(vgpu->id, "sync", oos_page->id,
1469 spt, spt->guest_page.type);
1470
1471 old.type = new.type = get_entry_type(spt->guest_page.type);
1472 old.val64 = new.val64 = 0;
1473
1474 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1475 info->gtt_entry_size_shift); index++) {
1476 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1477 ops->get_entry(NULL, &new, index, true,
1478 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1479
1480 if (old.val64 == new.val64
1481 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1482 continue;
1483
1484 trace_oos_sync(vgpu->id, oos_page->id,
1485 spt, spt->guest_page.type,
1486 new.val64, index);
1487
1488 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1489 if (ret)
1490 return ret;
1491
1492 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1493 }
1494
1495 spt->guest_page.write_cnt = 0;
1496 list_del_init(&spt->post_shadow_list);
1497 return 0;
1498 }
1499
detach_oos_page(struct intel_vgpu * vgpu,struct intel_vgpu_oos_page * oos_page)1500 static int detach_oos_page(struct intel_vgpu *vgpu,
1501 struct intel_vgpu_oos_page *oos_page)
1502 {
1503 struct intel_gvt *gvt = vgpu->gvt;
1504 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1505
1506 trace_oos_change(vgpu->id, "detach", oos_page->id,
1507 spt, spt->guest_page.type);
1508
1509 spt->guest_page.write_cnt = 0;
1510 spt->guest_page.oos_page = NULL;
1511 oos_page->spt = NULL;
1512
1513 list_del_init(&oos_page->vm_list);
1514 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1515
1516 return 0;
1517 }
1518
attach_oos_page(struct intel_vgpu_oos_page * oos_page,struct intel_vgpu_ppgtt_spt * spt)1519 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1520 struct intel_vgpu_ppgtt_spt *spt)
1521 {
1522 struct intel_gvt *gvt = spt->vgpu->gvt;
1523 int ret;
1524
1525 ret = intel_gvt_read_gpa(spt->vgpu,
1526 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1527 oos_page->mem, I915_GTT_PAGE_SIZE);
1528 if (ret)
1529 return ret;
1530
1531 oos_page->spt = spt;
1532 spt->guest_page.oos_page = oos_page;
1533
1534 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1535
1536 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1537 spt, spt->guest_page.type);
1538 return 0;
1539 }
1540
ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt * spt)1541 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1542 {
1543 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1544 int ret;
1545
1546 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1547 if (ret)
1548 return ret;
1549
1550 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1551 spt, spt->guest_page.type);
1552
1553 list_del_init(&oos_page->vm_list);
1554 return sync_oos_page(spt->vgpu, oos_page);
1555 }
1556
ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt * spt)1557 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1558 {
1559 struct intel_gvt *gvt = spt->vgpu->gvt;
1560 struct intel_gvt_gtt *gtt = &gvt->gtt;
1561 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1562 int ret;
1563
1564 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1565
1566 if (list_empty(>t->oos_page_free_list_head)) {
1567 oos_page = container_of(gtt->oos_page_use_list_head.next,
1568 struct intel_vgpu_oos_page, list);
1569 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1570 if (ret)
1571 return ret;
1572 ret = detach_oos_page(spt->vgpu, oos_page);
1573 if (ret)
1574 return ret;
1575 } else
1576 oos_page = container_of(gtt->oos_page_free_list_head.next,
1577 struct intel_vgpu_oos_page, list);
1578 return attach_oos_page(oos_page, spt);
1579 }
1580
ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt * spt)1581 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1582 {
1583 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1584
1585 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1586 return -EINVAL;
1587
1588 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1589 spt, spt->guest_page.type);
1590
1591 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1592 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1593 }
1594
1595 /**
1596 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1597 * @vgpu: a vGPU
1598 *
1599 * This function is called before submitting a guest workload to host,
1600 * to sync all the out-of-synced shadow for vGPU
1601 *
1602 * Returns:
1603 * Zero on success, negative error code if failed.
1604 */
intel_vgpu_sync_oos_pages(struct intel_vgpu * vgpu)1605 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1606 {
1607 struct list_head *pos, *n;
1608 struct intel_vgpu_oos_page *oos_page;
1609 int ret;
1610
1611 if (!enable_out_of_sync)
1612 return 0;
1613
1614 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1615 oos_page = container_of(pos,
1616 struct intel_vgpu_oos_page, vm_list);
1617 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1618 if (ret)
1619 return ret;
1620 }
1621 return 0;
1622 }
1623
1624 /*
1625 * The heart of PPGTT shadow page table.
1626 */
ppgtt_handle_guest_write_page_table(struct intel_vgpu_ppgtt_spt * spt,struct intel_gvt_gtt_entry * we,unsigned long index)1627 static int ppgtt_handle_guest_write_page_table(
1628 struct intel_vgpu_ppgtt_spt *spt,
1629 struct intel_gvt_gtt_entry *we, unsigned long index)
1630 {
1631 struct intel_vgpu *vgpu = spt->vgpu;
1632 int type = spt->shadow_page.type;
1633 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1634 struct intel_gvt_gtt_entry old_se;
1635 int new_present;
1636 int i, ret;
1637
1638 new_present = ops->test_present(we);
1639
1640 /*
1641 * Adding the new entry first and then removing the old one, that can
1642 * guarantee the ppgtt table is validated during the window between
1643 * adding and removal.
1644 */
1645 ppgtt_get_shadow_entry(spt, &old_se, index);
1646
1647 if (new_present) {
1648 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1649 if (ret)
1650 goto fail;
1651 }
1652
1653 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1654 if (ret)
1655 goto fail;
1656
1657 if (!new_present) {
1658 /* For 64KB splited entries, we need clear them all. */
1659 if (ops->test_64k_splited(&old_se) &&
1660 !(index % GTT_64K_PTE_STRIDE)) {
1661 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1662 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1663 ops->clear_64k_splited(&old_se);
1664 ops->set_pfn(&old_se,
1665 vgpu->gtt.scratch_pt[type].page_mfn);
1666 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1667 }
1668 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1669 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1670 ops->clear_pse(&old_se);
1671 ops->set_pfn(&old_se,
1672 vgpu->gtt.scratch_pt[type].page_mfn);
1673 ppgtt_set_shadow_entry(spt, &old_se, index);
1674 } else {
1675 ops->set_pfn(&old_se,
1676 vgpu->gtt.scratch_pt[type].page_mfn);
1677 ppgtt_set_shadow_entry(spt, &old_se, index);
1678 }
1679 }
1680
1681 return 0;
1682 fail:
1683 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1684 spt, we->val64, we->type);
1685 return ret;
1686 }
1687
1688
1689
can_do_out_of_sync(struct intel_vgpu_ppgtt_spt * spt)1690 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1691 {
1692 return enable_out_of_sync
1693 && gtt_type_is_pte_pt(spt->guest_page.type)
1694 && spt->guest_page.write_cnt >= 2;
1695 }
1696
ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt * spt,unsigned long index)1697 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1698 unsigned long index)
1699 {
1700 set_bit(index, spt->post_shadow_bitmap);
1701 if (!list_empty(&spt->post_shadow_list))
1702 return;
1703
1704 list_add_tail(&spt->post_shadow_list,
1705 &spt->vgpu->gtt.post_shadow_list_head);
1706 }
1707
1708 /**
1709 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1710 * @vgpu: a vGPU
1711 *
1712 * This function is called before submitting a guest workload to host,
1713 * to flush all the post shadows for a vGPU.
1714 *
1715 * Returns:
1716 * Zero on success, negative error code if failed.
1717 */
intel_vgpu_flush_post_shadow(struct intel_vgpu * vgpu)1718 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1719 {
1720 struct list_head *pos, *n;
1721 struct intel_vgpu_ppgtt_spt *spt;
1722 struct intel_gvt_gtt_entry ge;
1723 unsigned long index;
1724 int ret;
1725
1726 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1727 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1728 post_shadow_list);
1729
1730 for_each_set_bit(index, spt->post_shadow_bitmap,
1731 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1732 ppgtt_get_guest_entry(spt, &ge, index);
1733
1734 ret = ppgtt_handle_guest_write_page_table(spt,
1735 &ge, index);
1736 if (ret)
1737 return ret;
1738 clear_bit(index, spt->post_shadow_bitmap);
1739 }
1740 list_del_init(&spt->post_shadow_list);
1741 }
1742 return 0;
1743 }
1744
ppgtt_handle_guest_write_page_table_bytes(struct intel_vgpu_ppgtt_spt * spt,u64 pa,void * p_data,int bytes)1745 static int ppgtt_handle_guest_write_page_table_bytes(
1746 struct intel_vgpu_ppgtt_spt *spt,
1747 u64 pa, void *p_data, int bytes)
1748 {
1749 struct intel_vgpu *vgpu = spt->vgpu;
1750 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1751 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1752 struct intel_gvt_gtt_entry we, se;
1753 unsigned long index;
1754 int ret;
1755
1756 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1757
1758 ppgtt_get_guest_entry(spt, &we, index);
1759
1760 /*
1761 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1762 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1763 * ignored.
1764 */
1765 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1766 (index % GTT_64K_PTE_STRIDE)) {
1767 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1768 index);
1769 return 0;
1770 }
1771
1772 if (bytes == info->gtt_entry_size) {
1773 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1774 if (ret)
1775 return ret;
1776 } else {
1777 if (!test_bit(index, spt->post_shadow_bitmap)) {
1778 int type = spt->shadow_page.type;
1779
1780 ppgtt_get_shadow_entry(spt, &se, index);
1781 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1782 if (ret)
1783 return ret;
1784 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1785 ppgtt_set_shadow_entry(spt, &se, index);
1786 }
1787 ppgtt_set_post_shadow(spt, index);
1788 }
1789
1790 if (!enable_out_of_sync)
1791 return 0;
1792
1793 spt->guest_page.write_cnt++;
1794
1795 if (spt->guest_page.oos_page)
1796 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1797 false, 0, vgpu);
1798
1799 if (can_do_out_of_sync(spt)) {
1800 if (!spt->guest_page.oos_page)
1801 ppgtt_allocate_oos_page(spt);
1802
1803 ret = ppgtt_set_guest_page_oos(spt);
1804 if (ret < 0)
1805 return ret;
1806 }
1807 return 0;
1808 }
1809
invalidate_ppgtt_mm(struct intel_vgpu_mm * mm)1810 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1811 {
1812 struct intel_vgpu *vgpu = mm->vgpu;
1813 struct intel_gvt *gvt = vgpu->gvt;
1814 struct intel_gvt_gtt *gtt = &gvt->gtt;
1815 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1816 struct intel_gvt_gtt_entry se;
1817 int index;
1818
1819 if (!mm->ppgtt_mm.shadowed)
1820 return;
1821
1822 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1823 ppgtt_get_shadow_root_entry(mm, &se, index);
1824
1825 if (!ops->test_present(&se))
1826 continue;
1827
1828 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1829 se.val64 = 0;
1830 ppgtt_set_shadow_root_entry(mm, &se, index);
1831
1832 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1833 NULL, se.type, se.val64, index);
1834 }
1835
1836 mm->ppgtt_mm.shadowed = false;
1837 }
1838
1839
shadow_ppgtt_mm(struct intel_vgpu_mm * mm)1840 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1841 {
1842 struct intel_vgpu *vgpu = mm->vgpu;
1843 struct intel_gvt *gvt = vgpu->gvt;
1844 struct intel_gvt_gtt *gtt = &gvt->gtt;
1845 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1846 struct intel_vgpu_ppgtt_spt *spt;
1847 struct intel_gvt_gtt_entry ge, se;
1848 int index, ret;
1849
1850 if (mm->ppgtt_mm.shadowed)
1851 return 0;
1852
1853 mm->ppgtt_mm.shadowed = true;
1854
1855 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1856 ppgtt_get_guest_root_entry(mm, &ge, index);
1857
1858 if (!ops->test_present(&ge))
1859 continue;
1860
1861 trace_spt_guest_change(vgpu->id, __func__, NULL,
1862 ge.type, ge.val64, index);
1863
1864 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1865 if (IS_ERR(spt)) {
1866 gvt_vgpu_err("fail to populate guest root pointer\n");
1867 ret = PTR_ERR(spt);
1868 goto fail;
1869 }
1870 ppgtt_generate_shadow_entry(&se, spt, &ge);
1871 ppgtt_set_shadow_root_entry(mm, &se, index);
1872
1873 trace_spt_guest_change(vgpu->id, "populate root pointer",
1874 NULL, se.type, se.val64, index);
1875 }
1876
1877 return 0;
1878 fail:
1879 invalidate_ppgtt_mm(mm);
1880 return ret;
1881 }
1882
vgpu_alloc_mm(struct intel_vgpu * vgpu)1883 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1884 {
1885 struct intel_vgpu_mm *mm;
1886
1887 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1888 if (!mm)
1889 return NULL;
1890
1891 mm->vgpu = vgpu;
1892 kref_init(&mm->ref);
1893 atomic_set(&mm->pincount, 0);
1894
1895 return mm;
1896 }
1897
vgpu_free_mm(struct intel_vgpu_mm * mm)1898 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1899 {
1900 kfree(mm);
1901 }
1902
1903 /**
1904 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1905 * @vgpu: a vGPU
1906 * @root_entry_type: ppgtt root entry type
1907 * @pdps: guest pdps.
1908 *
1909 * This function is used to create a ppgtt mm object for a vGPU.
1910 *
1911 * Returns:
1912 * Zero on success, negative error code in pointer if failed.
1913 */
intel_vgpu_create_ppgtt_mm(struct intel_vgpu * vgpu,enum intel_gvt_gtt_type root_entry_type,u64 pdps[])1914 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1915 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1916 {
1917 struct intel_gvt *gvt = vgpu->gvt;
1918 struct intel_vgpu_mm *mm;
1919 int ret;
1920
1921 mm = vgpu_alloc_mm(vgpu);
1922 if (!mm)
1923 return ERR_PTR(-ENOMEM);
1924
1925 mm->type = INTEL_GVT_MM_PPGTT;
1926
1927 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1928 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1929 mm->ppgtt_mm.root_entry_type = root_entry_type;
1930
1931 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1932 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1933 INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1934
1935 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1936 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1937 else
1938 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1939 sizeof(mm->ppgtt_mm.guest_pdps));
1940
1941 ret = shadow_ppgtt_mm(mm);
1942 if (ret) {
1943 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1944 vgpu_free_mm(mm);
1945 return ERR_PTR(ret);
1946 }
1947
1948 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1949
1950 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1951 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1952 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1953
1954 return mm;
1955 }
1956
intel_vgpu_create_ggtt_mm(struct intel_vgpu * vgpu)1957 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1958 {
1959 struct intel_vgpu_mm *mm;
1960 unsigned long nr_entries;
1961
1962 mm = vgpu_alloc_mm(vgpu);
1963 if (!mm)
1964 return ERR_PTR(-ENOMEM);
1965
1966 mm->type = INTEL_GVT_MM_GGTT;
1967
1968 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1969 mm->ggtt_mm.virtual_ggtt =
1970 vzalloc(array_size(nr_entries,
1971 vgpu->gvt->device_info.gtt_entry_size));
1972 if (!mm->ggtt_mm.virtual_ggtt) {
1973 vgpu_free_mm(mm);
1974 return ERR_PTR(-ENOMEM);
1975 }
1976
1977 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1978 if (!mm->ggtt_mm.host_ggtt_aperture) {
1979 vfree(mm->ggtt_mm.virtual_ggtt);
1980 vgpu_free_mm(mm);
1981 return ERR_PTR(-ENOMEM);
1982 }
1983
1984 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1985 if (!mm->ggtt_mm.host_ggtt_hidden) {
1986 vfree(mm->ggtt_mm.host_ggtt_aperture);
1987 vfree(mm->ggtt_mm.virtual_ggtt);
1988 vgpu_free_mm(mm);
1989 return ERR_PTR(-ENOMEM);
1990 }
1991
1992 return mm;
1993 }
1994
1995 /**
1996 * _intel_vgpu_mm_release - destroy a mm object
1997 * @mm_ref: a kref object
1998 *
1999 * This function is used to destroy a mm object for vGPU
2000 *
2001 */
_intel_vgpu_mm_release(struct kref * mm_ref)2002 void _intel_vgpu_mm_release(struct kref *mm_ref)
2003 {
2004 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
2005
2006 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
2007 gvt_err("vgpu mm pin count bug detected\n");
2008
2009 if (mm->type == INTEL_GVT_MM_PPGTT) {
2010 list_del(&mm->ppgtt_mm.list);
2011
2012 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2013 list_del(&mm->ppgtt_mm.lru_list);
2014 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2015
2016 invalidate_ppgtt_mm(mm);
2017 } else {
2018 vfree(mm->ggtt_mm.virtual_ggtt);
2019 vfree(mm->ggtt_mm.host_ggtt_aperture);
2020 vfree(mm->ggtt_mm.host_ggtt_hidden);
2021 }
2022
2023 vgpu_free_mm(mm);
2024 }
2025
2026 /**
2027 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2028 * @mm: a vGPU mm object
2029 *
2030 * This function is called when user doesn't want to use a vGPU mm object
2031 */
intel_vgpu_unpin_mm(struct intel_vgpu_mm * mm)2032 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
2033 {
2034 atomic_dec_if_positive(&mm->pincount);
2035 }
2036
2037 /**
2038 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2039 * @mm: target vgpu mm
2040 *
2041 * This function is called when user wants to use a vGPU mm object. If this
2042 * mm object hasn't been shadowed yet, the shadow will be populated at this
2043 * time.
2044 *
2045 * Returns:
2046 * Zero on success, negative error code if failed.
2047 */
intel_vgpu_pin_mm(struct intel_vgpu_mm * mm)2048 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2049 {
2050 int ret;
2051
2052 atomic_inc(&mm->pincount);
2053
2054 if (mm->type == INTEL_GVT_MM_PPGTT) {
2055 ret = shadow_ppgtt_mm(mm);
2056 if (ret)
2057 return ret;
2058
2059 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2060 list_move_tail(&mm->ppgtt_mm.lru_list,
2061 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2062 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2063 }
2064
2065 return 0;
2066 }
2067
reclaim_one_ppgtt_mm(struct intel_gvt * gvt)2068 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2069 {
2070 struct intel_vgpu_mm *mm;
2071 struct list_head *pos, *n;
2072
2073 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2074
2075 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2076 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2077
2078 if (atomic_read(&mm->pincount))
2079 continue;
2080
2081 list_del_init(&mm->ppgtt_mm.lru_list);
2082 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2083 invalidate_ppgtt_mm(mm);
2084 return 1;
2085 }
2086 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2087 return 0;
2088 }
2089
2090 /*
2091 * GMA translation APIs.
2092 */
ppgtt_get_next_level_entry(struct intel_vgpu_mm * mm,struct intel_gvt_gtt_entry * e,unsigned long index,bool guest)2093 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2094 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2095 {
2096 struct intel_vgpu *vgpu = mm->vgpu;
2097 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2098 struct intel_vgpu_ppgtt_spt *s;
2099
2100 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2101 if (!s)
2102 return -ENXIO;
2103
2104 if (!guest)
2105 ppgtt_get_shadow_entry(s, e, index);
2106 else
2107 ppgtt_get_guest_entry(s, e, index);
2108 return 0;
2109 }
2110
2111 /**
2112 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2113 * @mm: mm object. could be a PPGTT or GGTT mm object
2114 * @gma: graphics memory address in this mm object
2115 *
2116 * This function is used to translate a graphics memory address in specific
2117 * graphics memory space to guest physical address.
2118 *
2119 * Returns:
2120 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2121 */
intel_vgpu_gma_to_gpa(struct intel_vgpu_mm * mm,unsigned long gma)2122 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2123 {
2124 struct intel_vgpu *vgpu = mm->vgpu;
2125 struct intel_gvt *gvt = vgpu->gvt;
2126 const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2127 const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2128 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2129 unsigned long gma_index[4];
2130 struct intel_gvt_gtt_entry e;
2131 int i, levels = 0;
2132 int ret;
2133
2134 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2135 mm->type != INTEL_GVT_MM_PPGTT);
2136
2137 if (mm->type == INTEL_GVT_MM_GGTT) {
2138 if (!vgpu_gmadr_is_valid(vgpu, gma))
2139 goto err;
2140
2141 ggtt_get_guest_entry(mm, &e,
2142 gma_ops->gma_to_ggtt_pte_index(gma));
2143
2144 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2145 + (gma & ~I915_GTT_PAGE_MASK);
2146
2147 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2148 } else {
2149 switch (mm->ppgtt_mm.root_entry_type) {
2150 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2151 ppgtt_get_shadow_root_entry(mm, &e, 0);
2152
2153 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2154 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2155 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2156 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2157 levels = 4;
2158 break;
2159 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2160 ppgtt_get_shadow_root_entry(mm, &e,
2161 gma_ops->gma_to_l3_pdp_index(gma));
2162
2163 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2164 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2165 levels = 2;
2166 break;
2167 default:
2168 GEM_BUG_ON(1);
2169 }
2170
2171 /* walk the shadow page table and get gpa from guest entry */
2172 for (i = 0; i < levels; i++) {
2173 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2174 (i == levels - 1));
2175 if (ret)
2176 goto err;
2177
2178 if (!pte_ops->test_present(&e)) {
2179 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2180 goto err;
2181 }
2182 }
2183
2184 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2185 (gma & ~I915_GTT_PAGE_MASK);
2186 trace_gma_translate(vgpu->id, "ppgtt", 0,
2187 mm->ppgtt_mm.root_entry_type, gma, gpa);
2188 }
2189
2190 return gpa;
2191 err:
2192 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2193 return INTEL_GVT_INVALID_ADDR;
2194 }
2195
emulate_ggtt_mmio_read(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)2196 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2197 unsigned int off, void *p_data, unsigned int bytes)
2198 {
2199 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2200 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2201 unsigned long index = off >> info->gtt_entry_size_shift;
2202 unsigned long gma;
2203 struct intel_gvt_gtt_entry e;
2204
2205 if (bytes != 4 && bytes != 8)
2206 return -EINVAL;
2207
2208 gma = index << I915_GTT_PAGE_SHIFT;
2209 if (!intel_gvt_ggtt_validate_range(vgpu,
2210 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2211 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2212 memset(p_data, 0, bytes);
2213 return 0;
2214 }
2215
2216 ggtt_get_guest_entry(ggtt_mm, &e, index);
2217 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2218 bytes);
2219 return 0;
2220 }
2221
2222 /**
2223 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2224 * @vgpu: a vGPU
2225 * @off: register offset
2226 * @p_data: data will be returned to guest
2227 * @bytes: data length
2228 *
2229 * This function is used to emulate the GTT MMIO register read
2230 *
2231 * Returns:
2232 * Zero on success, error code if failed.
2233 */
intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)2234 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2235 void *p_data, unsigned int bytes)
2236 {
2237 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2238 int ret;
2239
2240 if (bytes != 4 && bytes != 8)
2241 return -EINVAL;
2242
2243 off -= info->gtt_start_offset;
2244 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2245 return ret;
2246 }
2247
ggtt_invalidate_pte(struct intel_vgpu * vgpu,struct intel_gvt_gtt_entry * entry)2248 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2249 struct intel_gvt_gtt_entry *entry)
2250 {
2251 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2252 unsigned long pfn;
2253
2254 pfn = pte_ops->get_pfn(entry);
2255 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2256 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2257 }
2258
emulate_ggtt_mmio_write(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)2259 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2260 void *p_data, unsigned int bytes)
2261 {
2262 struct intel_gvt *gvt = vgpu->gvt;
2263 const struct intel_gvt_device_info *info = &gvt->device_info;
2264 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2265 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2266 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2267 unsigned long gma, gfn;
2268 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2269 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2270 dma_addr_t dma_addr;
2271 int ret;
2272 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2273 bool partial_update = false;
2274
2275 if (bytes != 4 && bytes != 8)
2276 return -EINVAL;
2277
2278 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2279
2280 /* the VM may configure the whole GM space when ballooning is used */
2281 if (!vgpu_gmadr_is_valid(vgpu, gma))
2282 return 0;
2283
2284 e.type = GTT_TYPE_GGTT_PTE;
2285 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2286 bytes);
2287
2288 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2289 * write, save the first 4 bytes in a list and update virtual
2290 * PTE. Only update shadow PTE when the second 4 bytes comes.
2291 */
2292 if (bytes < info->gtt_entry_size) {
2293 bool found = false;
2294
2295 list_for_each_entry_safe(pos, n,
2296 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2297 if (g_gtt_index == pos->offset >>
2298 info->gtt_entry_size_shift) {
2299 if (off != pos->offset) {
2300 /* the second partial part*/
2301 int last_off = pos->offset &
2302 (info->gtt_entry_size - 1);
2303
2304 memcpy((void *)&e.val64 + last_off,
2305 (void *)&pos->data + last_off,
2306 bytes);
2307
2308 list_del(&pos->list);
2309 kfree(pos);
2310 found = true;
2311 break;
2312 }
2313
2314 /* update of the first partial part */
2315 pos->data = e.val64;
2316 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2317 return 0;
2318 }
2319 }
2320
2321 if (!found) {
2322 /* the first partial part */
2323 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2324 if (!partial_pte)
2325 return -ENOMEM;
2326 partial_pte->offset = off;
2327 partial_pte->data = e.val64;
2328 list_add_tail(&partial_pte->list,
2329 &ggtt_mm->ggtt_mm.partial_pte_list);
2330 partial_update = true;
2331 }
2332 }
2333
2334 if (!partial_update && (ops->test_present(&e))) {
2335 gfn = ops->get_pfn(&e);
2336 m.val64 = e.val64;
2337 m.type = e.type;
2338
2339 /* one PTE update may be issued in multiple writes and the
2340 * first write may not construct a valid gfn
2341 */
2342 if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
2343 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2344 goto out;
2345 }
2346
2347 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2348 &dma_addr);
2349 if (ret) {
2350 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2351 /* guest driver may read/write the entry when partial
2352 * update the entry in this situation p2m will fail
2353 * setting the shadow entry to point to a scratch page
2354 */
2355 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2356 } else
2357 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2358 } else {
2359 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2360 ops->clear_present(&m);
2361 }
2362
2363 out:
2364 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2365
2366 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2367 ggtt_invalidate_pte(vgpu, &e);
2368
2369 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2370 ggtt_invalidate(gvt->gt);
2371 return 0;
2372 }
2373
2374 /*
2375 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2376 * @vgpu: a vGPU
2377 * @off: register offset
2378 * @p_data: data from guest write
2379 * @bytes: data length
2380 *
2381 * This function is used to emulate the GTT MMIO register write
2382 *
2383 * Returns:
2384 * Zero on success, error code if failed.
2385 */
intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)2386 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2387 unsigned int off, void *p_data, unsigned int bytes)
2388 {
2389 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2390 int ret;
2391 struct intel_vgpu_submission *s = &vgpu->submission;
2392 struct intel_engine_cs *engine;
2393 int i;
2394
2395 if (bytes != 4 && bytes != 8)
2396 return -EINVAL;
2397
2398 off -= info->gtt_start_offset;
2399 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2400
2401 /* if ggtt of last submitted context is written,
2402 * that context is probably got unpinned.
2403 * Set last shadowed ctx to invalid.
2404 */
2405 for_each_engine(engine, vgpu->gvt->gt, i) {
2406 if (!s->last_ctx[i].valid)
2407 continue;
2408
2409 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2410 s->last_ctx[i].valid = false;
2411 }
2412 return ret;
2413 }
2414
alloc_scratch_pages(struct intel_vgpu * vgpu,enum intel_gvt_gtt_type type)2415 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2416 enum intel_gvt_gtt_type type)
2417 {
2418 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2419 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2420 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2421 int page_entry_num = I915_GTT_PAGE_SIZE >>
2422 vgpu->gvt->device_info.gtt_entry_size_shift;
2423 void *scratch_pt;
2424 int i;
2425 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2426 dma_addr_t daddr;
2427
2428 if (drm_WARN_ON(&i915->drm,
2429 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2430 return -EINVAL;
2431
2432 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2433 if (!scratch_pt) {
2434 gvt_vgpu_err("fail to allocate scratch page\n");
2435 return -ENOMEM;
2436 }
2437
2438 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
2439 if (dma_mapping_error(dev, daddr)) {
2440 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2441 __free_page(virt_to_page(scratch_pt));
2442 return -ENOMEM;
2443 }
2444 gtt->scratch_pt[type].page_mfn =
2445 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2446 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2447 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2448 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2449
2450 /* Build the tree by full filled the scratch pt with the entries which
2451 * point to the next level scratch pt or scratch page. The
2452 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2453 * 'type' pt.
2454 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2455 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2456 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2457 */
2458 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2459 struct intel_gvt_gtt_entry se;
2460
2461 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2462 se.type = get_entry_type(type - 1);
2463 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2464
2465 /* The entry parameters like present/writeable/cache type
2466 * set to the same as i915's scratch page tree.
2467 */
2468 se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
2469 if (type == GTT_TYPE_PPGTT_PDE_PT)
2470 se.val64 |= PPAT_CACHED;
2471
2472 for (i = 0; i < page_entry_num; i++)
2473 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2474 }
2475
2476 return 0;
2477 }
2478
release_scratch_page_tree(struct intel_vgpu * vgpu)2479 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2480 {
2481 int i;
2482 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2483 dma_addr_t daddr;
2484
2485 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2486 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2487 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2488 I915_GTT_PAGE_SHIFT);
2489 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2490 __free_page(vgpu->gtt.scratch_pt[i].page);
2491 vgpu->gtt.scratch_pt[i].page = NULL;
2492 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2493 }
2494 }
2495
2496 return 0;
2497 }
2498
create_scratch_page_tree(struct intel_vgpu * vgpu)2499 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2500 {
2501 int i, ret;
2502
2503 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2504 ret = alloc_scratch_pages(vgpu, i);
2505 if (ret)
2506 goto err;
2507 }
2508
2509 return 0;
2510
2511 err:
2512 release_scratch_page_tree(vgpu);
2513 return ret;
2514 }
2515
2516 /**
2517 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2518 * @vgpu: a vGPU
2519 *
2520 * This function is used to initialize per-vGPU graphics memory virtualization
2521 * components.
2522 *
2523 * Returns:
2524 * Zero on success, error code if failed.
2525 */
intel_vgpu_init_gtt(struct intel_vgpu * vgpu)2526 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2527 {
2528 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2529
2530 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2531
2532 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2533 INIT_LIST_HEAD(>t->oos_page_list_head);
2534 INIT_LIST_HEAD(>t->post_shadow_list_head);
2535
2536 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2537 if (IS_ERR(gtt->ggtt_mm)) {
2538 gvt_vgpu_err("fail to create mm for ggtt.\n");
2539 return PTR_ERR(gtt->ggtt_mm);
2540 }
2541
2542 intel_vgpu_reset_ggtt(vgpu, false);
2543
2544 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
2545
2546 return create_scratch_page_tree(vgpu);
2547 }
2548
intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu * vgpu)2549 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2550 {
2551 struct list_head *pos, *n;
2552 struct intel_vgpu_mm *mm;
2553
2554 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2555 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2556 intel_vgpu_destroy_mm(mm);
2557 }
2558
2559 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2560 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2561
2562 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2563 gvt_err("Why we still has spt not freed?\n");
2564 ppgtt_free_all_spt(vgpu);
2565 }
2566 }
2567
intel_vgpu_destroy_ggtt_mm(struct intel_vgpu * vgpu)2568 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2569 {
2570 struct intel_gvt_partial_pte *pos, *next;
2571
2572 list_for_each_entry_safe(pos, next,
2573 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2574 list) {
2575 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2576 pos->offset, pos->data);
2577 kfree(pos);
2578 }
2579 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2580 vgpu->gtt.ggtt_mm = NULL;
2581 }
2582
2583 /**
2584 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2585 * @vgpu: a vGPU
2586 *
2587 * This function is used to clean up per-vGPU graphics memory virtualization
2588 * components.
2589 *
2590 * Returns:
2591 * Zero on success, error code if failed.
2592 */
intel_vgpu_clean_gtt(struct intel_vgpu * vgpu)2593 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2594 {
2595 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2596 intel_vgpu_destroy_ggtt_mm(vgpu);
2597 release_scratch_page_tree(vgpu);
2598 }
2599
clean_spt_oos(struct intel_gvt * gvt)2600 static void clean_spt_oos(struct intel_gvt *gvt)
2601 {
2602 struct intel_gvt_gtt *gtt = &gvt->gtt;
2603 struct list_head *pos, *n;
2604 struct intel_vgpu_oos_page *oos_page;
2605
2606 WARN(!list_empty(>t->oos_page_use_list_head),
2607 "someone is still using oos page\n");
2608
2609 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2610 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2611 list_del(&oos_page->list);
2612 free_page((unsigned long)oos_page->mem);
2613 kfree(oos_page);
2614 }
2615 }
2616
setup_spt_oos(struct intel_gvt * gvt)2617 static int setup_spt_oos(struct intel_gvt *gvt)
2618 {
2619 struct intel_gvt_gtt *gtt = &gvt->gtt;
2620 struct intel_vgpu_oos_page *oos_page;
2621 int i;
2622 int ret;
2623
2624 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2625 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2626
2627 for (i = 0; i < preallocated_oos_pages; i++) {
2628 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2629 if (!oos_page) {
2630 ret = -ENOMEM;
2631 goto fail;
2632 }
2633 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2634 if (!oos_page->mem) {
2635 ret = -ENOMEM;
2636 kfree(oos_page);
2637 goto fail;
2638 }
2639
2640 INIT_LIST_HEAD(&oos_page->list);
2641 INIT_LIST_HEAD(&oos_page->vm_list);
2642 oos_page->id = i;
2643 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2644 }
2645
2646 gvt_dbg_mm("%d oos pages preallocated\n", i);
2647
2648 return 0;
2649 fail:
2650 clean_spt_oos(gvt);
2651 return ret;
2652 }
2653
2654 /**
2655 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2656 * @vgpu: a vGPU
2657 * @pdps: pdp root array
2658 *
2659 * This function is used to find a PPGTT mm object from mm object pool
2660 *
2661 * Returns:
2662 * pointer to mm object on success, NULL if failed.
2663 */
intel_vgpu_find_ppgtt_mm(struct intel_vgpu * vgpu,u64 pdps[])2664 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2665 u64 pdps[])
2666 {
2667 struct intel_vgpu_mm *mm;
2668 struct list_head *pos;
2669
2670 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2671 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2672
2673 switch (mm->ppgtt_mm.root_entry_type) {
2674 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2675 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2676 return mm;
2677 break;
2678 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2679 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2680 sizeof(mm->ppgtt_mm.guest_pdps)))
2681 return mm;
2682 break;
2683 default:
2684 GEM_BUG_ON(1);
2685 }
2686 }
2687 return NULL;
2688 }
2689
2690 /**
2691 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2692 * @vgpu: a vGPU
2693 * @root_entry_type: ppgtt root entry type
2694 * @pdps: guest pdps
2695 *
2696 * This function is used to find or create a PPGTT mm object from a guest.
2697 *
2698 * Returns:
2699 * Zero on success, negative error code if failed.
2700 */
intel_vgpu_get_ppgtt_mm(struct intel_vgpu * vgpu,enum intel_gvt_gtt_type root_entry_type,u64 pdps[])2701 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2702 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2703 {
2704 struct intel_vgpu_mm *mm;
2705
2706 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2707 if (mm) {
2708 intel_vgpu_mm_get(mm);
2709 } else {
2710 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2711 if (IS_ERR(mm))
2712 gvt_vgpu_err("fail to create mm\n");
2713 }
2714 return mm;
2715 }
2716
2717 /**
2718 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2719 * @vgpu: a vGPU
2720 * @pdps: guest pdps
2721 *
2722 * This function is used to find a PPGTT mm object from a guest and destroy it.
2723 *
2724 * Returns:
2725 * Zero on success, negative error code if failed.
2726 */
intel_vgpu_put_ppgtt_mm(struct intel_vgpu * vgpu,u64 pdps[])2727 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2728 {
2729 struct intel_vgpu_mm *mm;
2730
2731 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2732 if (!mm) {
2733 gvt_vgpu_err("fail to find ppgtt instance.\n");
2734 return -EINVAL;
2735 }
2736 intel_vgpu_mm_put(mm);
2737 return 0;
2738 }
2739
2740 /**
2741 * intel_gvt_init_gtt - initialize mm components of a GVT device
2742 * @gvt: GVT device
2743 *
2744 * This function is called at the initialization stage, to initialize
2745 * the mm components of a GVT device.
2746 *
2747 * Returns:
2748 * zero on success, negative error code if failed.
2749 */
intel_gvt_init_gtt(struct intel_gvt * gvt)2750 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2751 {
2752 int ret;
2753 void *page;
2754 struct device *dev = gvt->gt->i915->drm.dev;
2755 dma_addr_t daddr;
2756
2757 gvt_dbg_core("init gtt\n");
2758
2759 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2760 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2761
2762 page = (void *)get_zeroed_page(GFP_KERNEL);
2763 if (!page) {
2764 gvt_err("fail to allocate scratch ggtt page\n");
2765 return -ENOMEM;
2766 }
2767
2768 daddr = dma_map_page(dev, virt_to_page(page), 0,
2769 4096, DMA_BIDIRECTIONAL);
2770 if (dma_mapping_error(dev, daddr)) {
2771 gvt_err("fail to dmamap scratch ggtt page\n");
2772 __free_page(virt_to_page(page));
2773 return -ENOMEM;
2774 }
2775
2776 gvt->gtt.scratch_page = virt_to_page(page);
2777 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2778
2779 if (enable_out_of_sync) {
2780 ret = setup_spt_oos(gvt);
2781 if (ret) {
2782 gvt_err("fail to initialize SPT oos\n");
2783 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2784 __free_page(gvt->gtt.scratch_page);
2785 return ret;
2786 }
2787 }
2788 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2789 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2790 return 0;
2791 }
2792
2793 /**
2794 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2795 * @gvt: GVT device
2796 *
2797 * This function is called at the driver unloading stage, to clean up the
2798 * the mm components of a GVT device.
2799 *
2800 */
intel_gvt_clean_gtt(struct intel_gvt * gvt)2801 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2802 {
2803 struct device *dev = gvt->gt->i915->drm.dev;
2804 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2805 I915_GTT_PAGE_SHIFT);
2806
2807 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2808
2809 __free_page(gvt->gtt.scratch_page);
2810
2811 if (enable_out_of_sync)
2812 clean_spt_oos(gvt);
2813 }
2814
2815 /**
2816 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2817 * @vgpu: a vGPU
2818 *
2819 * This function is called when invalidate all PPGTT instances of a vGPU.
2820 *
2821 */
intel_vgpu_invalidate_ppgtt(struct intel_vgpu * vgpu)2822 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2823 {
2824 struct list_head *pos, *n;
2825 struct intel_vgpu_mm *mm;
2826
2827 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2828 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2829 if (mm->type == INTEL_GVT_MM_PPGTT) {
2830 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2831 list_del_init(&mm->ppgtt_mm.lru_list);
2832 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2833 if (mm->ppgtt_mm.shadowed)
2834 invalidate_ppgtt_mm(mm);
2835 }
2836 }
2837 }
2838
2839 /**
2840 * intel_vgpu_reset_ggtt - reset the GGTT entry
2841 * @vgpu: a vGPU
2842 * @invalidate_old: invalidate old entries
2843 *
2844 * This function is called at the vGPU create stage
2845 * to reset all the GGTT entries.
2846 *
2847 */
intel_vgpu_reset_ggtt(struct intel_vgpu * vgpu,bool invalidate_old)2848 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2849 {
2850 struct intel_gvt *gvt = vgpu->gvt;
2851 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2852 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2853 struct intel_gvt_gtt_entry old_entry;
2854 u32 index;
2855 u32 num_entries;
2856
2857 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2858 pte_ops->set_present(&entry);
2859
2860 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2861 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2862 while (num_entries--) {
2863 if (invalidate_old) {
2864 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2865 ggtt_invalidate_pte(vgpu, &old_entry);
2866 }
2867 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2868 }
2869
2870 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2871 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2872 while (num_entries--) {
2873 if (invalidate_old) {
2874 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2875 ggtt_invalidate_pte(vgpu, &old_entry);
2876 }
2877 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2878 }
2879
2880 ggtt_invalidate(gvt->gt);
2881 }
2882
2883 /**
2884 * intel_vgpu_reset_gtt - reset the all GTT related status
2885 * @vgpu: a vGPU
2886 *
2887 * This function is called from vfio core to reset reset all
2888 * GTT related status, including GGTT, PPGTT, scratch page.
2889 *
2890 */
intel_vgpu_reset_gtt(struct intel_vgpu * vgpu)2891 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2892 {
2893 /* Shadow pages are only created when there is no page
2894 * table tracking data, so remove page tracking data after
2895 * removing the shadow pages.
2896 */
2897 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2898 intel_vgpu_reset_ggtt(vgpu, true);
2899 }
2900
2901 /**
2902 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2903 * @gvt: intel gvt device
2904 *
2905 * This function is called at driver resume stage to restore
2906 * GGTT entries of every vGPU.
2907 *
2908 */
intel_gvt_restore_ggtt(struct intel_gvt * gvt)2909 void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2910 {
2911 struct intel_vgpu *vgpu;
2912 struct intel_vgpu_mm *mm;
2913 int id;
2914 gen8_pte_t pte;
2915 u32 idx, num_low, num_hi, offset;
2916
2917 /* Restore dirty host ggtt for all vGPUs */
2918 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2919 mm = vgpu->gtt.ggtt_mm;
2920
2921 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2922 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2923 for (idx = 0; idx < num_low; idx++) {
2924 pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2925 if (pte & GEN8_PAGE_PRESENT)
2926 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2927 }
2928
2929 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2930 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2931 for (idx = 0; idx < num_hi; idx++) {
2932 pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2933 if (pte & GEN8_PAGE_PRESENT)
2934 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2935 }
2936 }
2937 }
2938