1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/log2.h>
7
8 #include "gem/i915_gem_internal.h"
9
10 #include "gen6_ppgtt.h"
11 #include "i915_scatterlist.h"
12 #include "i915_trace.h"
13 #include "i915_vgpu.h"
14 #include "intel_gt_regs.h"
15 #include "intel_engine_regs.h"
16 #include "intel_gt.h"
17
18 /* Write pde (index) from the page directory @pd to the page table @pt */
gen6_write_pde(const struct gen6_ppgtt * ppgtt,const unsigned int pde,const struct i915_page_table * pt)19 static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
20 const unsigned int pde,
21 const struct i915_page_table *pt)
22 {
23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
24
25 /* Caller needs to make sure the write completes if necessary */
26 iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID,
27 ppgtt->pd_addr + pde);
28 }
29
gen7_ppgtt_enable(struct intel_gt * gt)30 void gen7_ppgtt_enable(struct intel_gt *gt)
31 {
32 struct drm_i915_private *i915 = gt->i915;
33 struct intel_uncore *uncore = gt->uncore;
34 u32 ecochk;
35
36 intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
37
38 ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
39 if (IS_HASWELL(i915)) {
40 ecochk |= ECOCHK_PPGTT_WB_HSW;
41 } else {
42 ecochk |= ECOCHK_PPGTT_LLC_IVB;
43 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
44 }
45 intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
46 }
47
gen6_ppgtt_enable(struct intel_gt * gt)48 void gen6_ppgtt_enable(struct intel_gt *gt)
49 {
50 struct intel_uncore *uncore = gt->uncore;
51
52 intel_uncore_rmw(uncore,
53 GAC_ECO_BITS,
54 0,
55 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
56
57 intel_uncore_rmw(uncore,
58 GAB_CTL,
59 0,
60 GAB_CTL_CONT_AFTER_PAGEFAULT);
61
62 intel_uncore_rmw(uncore,
63 GAM_ECOCHK,
64 0,
65 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
66
67 if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
68 intel_uncore_write(uncore,
69 GFX_MODE,
70 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
71 }
72
73 /* PPGTT support for Sandybdrige/Gen6 and later */
gen6_ppgtt_clear_range(struct i915_address_space * vm,u64 start,u64 length)74 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
75 u64 start, u64 length)
76 {
77 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
78 const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
80 unsigned int pde = first_entry / GEN6_PTES;
81 unsigned int pte = first_entry % GEN6_PTES;
82 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
83
84 while (num_entries) {
85 struct i915_page_table * const pt =
86 i915_pt_entry(ppgtt->base.pd, pde++);
87 const unsigned int count = min(num_entries, GEN6_PTES - pte);
88 gen6_pte_t *vaddr;
89
90 num_entries -= count;
91
92 GEM_BUG_ON(count > atomic_read(&pt->used));
93 if (!atomic_sub_return(count, &pt->used))
94 ppgtt->scan_for_unused_pt = true;
95
96 /*
97 * Note that the hw doesn't support removing PDE on the fly
98 * (they are cached inside the context with no means to
99 * invalidate the cache), so we can only reset the PTE
100 * entries back to scratch.
101 */
102
103 vaddr = px_vaddr(pt);
104 memset32(vaddr + pte, scratch_pte, count);
105
106 pte = 0;
107 }
108 }
109
gen6_ppgtt_insert_entries(struct i915_address_space * vm,struct i915_vma_resource * vma_res,enum i915_cache_level cache_level,u32 flags)110 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
111 struct i915_vma_resource *vma_res,
112 enum i915_cache_level cache_level,
113 u32 flags)
114 {
115 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
116 struct i915_page_directory * const pd = ppgtt->pd;
117 unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
118 unsigned int act_pt = first_entry / GEN6_PTES;
119 unsigned int act_pte = first_entry % GEN6_PTES;
120 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
121 struct sgt_dma iter = sgt_dma(vma_res);
122 gen6_pte_t *vaddr;
123
124 GEM_BUG_ON(!pd->entry[act_pt]);
125
126 vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
127 do {
128 GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
129 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
130
131 iter.dma += I915_GTT_PAGE_SIZE;
132 if (iter.dma == iter.max) {
133 iter.sg = __sg_next(iter.sg);
134 if (!iter.sg || sg_dma_len(iter.sg) == 0)
135 break;
136
137 iter.dma = sg_dma_address(iter.sg);
138 iter.max = iter.dma + sg_dma_len(iter.sg);
139 }
140
141 if (++act_pte == GEN6_PTES) {
142 vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
143 act_pte = 0;
144 }
145 } while (1);
146
147 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
148 }
149
gen6_flush_pd(struct gen6_ppgtt * ppgtt,u64 start,u64 end)150 static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
151 {
152 struct i915_page_directory * const pd = ppgtt->base.pd;
153 struct i915_page_table *pt;
154 unsigned int pde;
155
156 start = round_down(start, SZ_64K);
157 end = round_up(end, SZ_64K) - start;
158
159 mutex_lock(&ppgtt->flush);
160
161 gen6_for_each_pde(pt, pd, start, end, pde)
162 gen6_write_pde(ppgtt, pde, pt);
163
164 mb();
165 ioread32(ppgtt->pd_addr + pde - 1);
166 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
167 mb();
168
169 mutex_unlock(&ppgtt->flush);
170 }
171
gen6_alloc_va_range(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,u64 start,u64 length)172 static void gen6_alloc_va_range(struct i915_address_space *vm,
173 struct i915_vm_pt_stash *stash,
174 u64 start, u64 length)
175 {
176 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
177 struct i915_page_directory * const pd = ppgtt->base.pd;
178 struct i915_page_table *pt;
179 bool flush = false;
180 u64 from = start;
181 unsigned int pde;
182
183 spin_lock(&pd->lock);
184 gen6_for_each_pde(pt, pd, start, length, pde) {
185 const unsigned int count = gen6_pte_count(start, length);
186
187 if (!pt) {
188 spin_unlock(&pd->lock);
189
190 pt = stash->pt[0];
191 __i915_gem_object_pin_pages(pt->base);
192
193 fill32_px(pt, vm->scratch[0]->encode);
194
195 spin_lock(&pd->lock);
196 if (!pd->entry[pde]) {
197 stash->pt[0] = pt->stash;
198 atomic_set(&pt->used, 0);
199 pd->entry[pde] = pt;
200 } else {
201 pt = pd->entry[pde];
202 }
203
204 flush = true;
205 }
206
207 atomic_add(count, &pt->used);
208 }
209 spin_unlock(&pd->lock);
210
211 if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
212 intel_wakeref_t wakeref;
213
214 with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
215 gen6_flush_pd(ppgtt, from, start);
216 }
217 }
218
gen6_ppgtt_init_scratch(struct gen6_ppgtt * ppgtt)219 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
220 {
221 struct i915_address_space * const vm = &ppgtt->base.vm;
222 int ret;
223
224 ret = setup_scratch_page(vm);
225 if (ret)
226 return ret;
227
228 vm->scratch[0]->encode =
229 vm->pte_encode(px_dma(vm->scratch[0]),
230 I915_CACHE_NONE, PTE_READ_ONLY);
231
232 vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
233 if (IS_ERR(vm->scratch[1])) {
234 ret = PTR_ERR(vm->scratch[1]);
235 goto err_scratch0;
236 }
237
238 ret = map_pt_dma(vm, vm->scratch[1]);
239 if (ret)
240 goto err_scratch1;
241
242 fill32_px(vm->scratch[1], vm->scratch[0]->encode);
243
244 return 0;
245
246 err_scratch1:
247 i915_gem_object_put(vm->scratch[1]);
248 err_scratch0:
249 i915_gem_object_put(vm->scratch[0]);
250 vm->scratch[0] = NULL;
251 return ret;
252 }
253
gen6_ppgtt_free_pd(struct gen6_ppgtt * ppgtt)254 static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
255 {
256 struct i915_page_directory * const pd = ppgtt->base.pd;
257 struct i915_page_table *pt;
258 u32 pde;
259
260 gen6_for_all_pdes(pt, pd, pde)
261 if (pt)
262 free_pt(&ppgtt->base.vm, pt);
263 }
264
gen6_ppgtt_cleanup(struct i915_address_space * vm)265 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
266 {
267 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
268
269 gen6_ppgtt_free_pd(ppgtt);
270 free_scratch(vm);
271
272 if (ppgtt->base.pd)
273 free_pd(&ppgtt->base.vm, ppgtt->base.pd);
274
275 mutex_destroy(&ppgtt->flush);
276 }
277
pd_vma_bind(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma_resource * vma_res,enum i915_cache_level cache_level,u32 unused)278 static void pd_vma_bind(struct i915_address_space *vm,
279 struct i915_vm_pt_stash *stash,
280 struct i915_vma_resource *vma_res,
281 enum i915_cache_level cache_level,
282 u32 unused)
283 {
284 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
285 struct gen6_ppgtt *ppgtt = vma_res->private;
286 u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
287
288 ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
289 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
290
291 gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
292 }
293
pd_vma_unbind(struct i915_address_space * vm,struct i915_vma_resource * vma_res)294 static void pd_vma_unbind(struct i915_address_space *vm,
295 struct i915_vma_resource *vma_res)
296 {
297 struct gen6_ppgtt *ppgtt = vma_res->private;
298 struct i915_page_directory * const pd = ppgtt->base.pd;
299 struct i915_page_table *pt;
300 unsigned int pde;
301
302 if (!ppgtt->scan_for_unused_pt)
303 return;
304
305 /* Free all no longer used page tables */
306 gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
307 if (!pt || atomic_read(&pt->used))
308 continue;
309
310 free_pt(&ppgtt->base.vm, pt);
311 pd->entry[pde] = NULL;
312 }
313
314 ppgtt->scan_for_unused_pt = false;
315 }
316
317 static const struct i915_vma_ops pd_vma_ops = {
318 .bind_vma = pd_vma_bind,
319 .unbind_vma = pd_vma_unbind,
320 };
321
gen6_ppgtt_pin(struct i915_ppgtt * base,struct i915_gem_ww_ctx * ww)322 int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
323 {
324 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
325 int err;
326
327 GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
328
329 /*
330 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
331 * which will be pinned into every active context.
332 * (When vma->pin_count becomes atomic, I expect we will naturally
333 * need a larger, unpacked, type and kill this redundancy.)
334 */
335 if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
336 return 0;
337
338 /* grab the ppgtt resv to pin the object */
339 err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
340 if (err)
341 return err;
342
343 /*
344 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
345 * allocator works in address space sizes, so it's multiplied by page
346 * size. We allocate at the top of the GTT to avoid fragmentation.
347 */
348 if (!atomic_read(&ppgtt->pin_count)) {
349 err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
350
351 GEM_BUG_ON(ppgtt->vma->fence);
352 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
353 }
354 if (!err)
355 atomic_inc(&ppgtt->pin_count);
356
357 return err;
358 }
359
pd_dummy_obj_get_pages(struct drm_i915_gem_object * obj)360 static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
361 {
362 obj->mm.pages = ZERO_SIZE_PTR;
363 return 0;
364 }
365
pd_dummy_obj_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)366 static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
367 struct sg_table *pages)
368 {
369 }
370
371 static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
372 .name = "pd_dummy_obj",
373 .get_pages = pd_dummy_obj_get_pages,
374 .put_pages = pd_dummy_obj_put_pages,
375 };
376
377 static struct i915_page_directory *
gen6_alloc_top_pd(struct gen6_ppgtt * ppgtt)378 gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
379 {
380 struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
381 struct i915_page_directory *pd;
382 int err;
383
384 pd = __alloc_pd(I915_PDES);
385 if (unlikely(!pd))
386 return ERR_PTR(-ENOMEM);
387
388 pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
389 &pd_dummy_obj_ops,
390 I915_PDES * SZ_4K);
391 if (IS_ERR(pd->pt.base)) {
392 err = PTR_ERR(pd->pt.base);
393 pd->pt.base = NULL;
394 goto err_pd;
395 }
396
397 pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
398 pd->pt.base->shares_resv_from = &ppgtt->base.vm;
399
400 ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
401 if (IS_ERR(ppgtt->vma)) {
402 err = PTR_ERR(ppgtt->vma);
403 ppgtt->vma = NULL;
404 goto err_pd;
405 }
406
407 /* The dummy object we create is special, override ops.. */
408 ppgtt->vma->ops = &pd_vma_ops;
409 ppgtt->vma->private = ppgtt;
410 return pd;
411
412 err_pd:
413 free_pd(&ppgtt->base.vm, pd);
414 return ERR_PTR(err);
415 }
416
gen6_ppgtt_unpin(struct i915_ppgtt * base)417 void gen6_ppgtt_unpin(struct i915_ppgtt *base)
418 {
419 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
420
421 GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
422 if (atomic_dec_and_test(&ppgtt->pin_count))
423 i915_vma_unpin(ppgtt->vma);
424 }
425
gen6_ppgtt_create(struct intel_gt * gt)426 struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
427 {
428 struct i915_ggtt * const ggtt = gt->ggtt;
429 struct gen6_ppgtt *ppgtt;
430 int err;
431
432 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
433 if (!ppgtt)
434 return ERR_PTR(-ENOMEM);
435
436 mutex_init(&ppgtt->flush);
437
438 ppgtt_init(&ppgtt->base, gt, 0);
439 ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
440 ppgtt->base.vm.top = 1;
441
442 ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
443 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
444 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
445 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
446 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
447
448 ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
449 ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
450 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
451
452 err = gen6_ppgtt_init_scratch(ppgtt);
453 if (err)
454 goto err_put;
455
456 ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
457 if (IS_ERR(ppgtt->base.pd)) {
458 err = PTR_ERR(ppgtt->base.pd);
459 goto err_put;
460 }
461
462 return &ppgtt->base;
463
464 err_put:
465 i915_vm_put(&ppgtt->base.vm);
466 return ERR_PTR(err);
467 }
468