1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_domain.h"
7 #include "gt/gen8_ppgtt.h"
8 
9 #include "i915_drv.h"
10 #include "intel_display_types.h"
11 #include "intel_dpt.h"
12 #include "intel_fb.h"
13 
14 struct i915_dpt {
15 	struct i915_address_space vm;
16 
17 	struct drm_i915_gem_object *obj;
18 	struct i915_vma *vma;
19 	void __iomem *iomem;
20 };
21 
22 #define i915_is_dpt(vm) ((vm)->is_dpt)
23 
24 static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space * vm)25 i915_vm_to_dpt(struct i915_address_space *vm)
26 {
27 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
28 	GEM_BUG_ON(!i915_is_dpt(vm));
29 	return container_of(vm, struct i915_dpt, vm);
30 }
31 
32 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
33 
gen8_set_pte(void __iomem * addr,gen8_pte_t pte)34 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
35 {
36 	writeq(pte, addr);
37 }
38 
dpt_insert_page(struct i915_address_space * vm,dma_addr_t addr,u64 offset,enum i915_cache_level level,u32 flags)39 static void dpt_insert_page(struct i915_address_space *vm,
40 			    dma_addr_t addr,
41 			    u64 offset,
42 			    enum i915_cache_level level,
43 			    u32 flags)
44 {
45 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
46 	gen8_pte_t __iomem *base = dpt->iomem;
47 
48 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
49 		     vm->pte_encode(addr, level, flags));
50 }
51 
dpt_insert_entries(struct i915_address_space * vm,struct i915_vma_resource * vma_res,enum i915_cache_level level,u32 flags)52 static void dpt_insert_entries(struct i915_address_space *vm,
53 			       struct i915_vma_resource *vma_res,
54 			       enum i915_cache_level level,
55 			       u32 flags)
56 {
57 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
58 	gen8_pte_t __iomem *base = dpt->iomem;
59 	const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
60 	struct sgt_iter sgt_iter;
61 	dma_addr_t addr;
62 	int i;
63 
64 	/*
65 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
66 	 * not to allow the user to override access to a read only page.
67 	 */
68 
69 	i = vma_res->start / I915_GTT_PAGE_SIZE;
70 	for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
71 		gen8_set_pte(&base[i++], pte_encode | addr);
72 }
73 
dpt_clear_range(struct i915_address_space * vm,u64 start,u64 length)74 static void dpt_clear_range(struct i915_address_space *vm,
75 			    u64 start, u64 length)
76 {
77 }
78 
dpt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma_resource * vma_res,enum i915_cache_level cache_level,u32 flags)79 static void dpt_bind_vma(struct i915_address_space *vm,
80 			 struct i915_vm_pt_stash *stash,
81 			 struct i915_vma_resource *vma_res,
82 			 enum i915_cache_level cache_level,
83 			 u32 flags)
84 {
85 	u32 pte_flags;
86 
87 	if (vma_res->bound_flags)
88 		return;
89 
90 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
91 	pte_flags = 0;
92 	if (vm->has_read_only && vma_res->bi.readonly)
93 		pte_flags |= PTE_READ_ONLY;
94 	if (vma_res->bi.lmem)
95 		pte_flags |= PTE_LM;
96 
97 	vm->insert_entries(vm, vma_res, cache_level, pte_flags);
98 
99 	vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
100 
101 	/*
102 	 * Without aliasing PPGTT there's no difference between
103 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
104 	 * upgrade to both bound if we bind either to avoid double-binding.
105 	 */
106 	vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
107 }
108 
dpt_unbind_vma(struct i915_address_space * vm,struct i915_vma_resource * vma_res)109 static void dpt_unbind_vma(struct i915_address_space *vm,
110 			   struct i915_vma_resource *vma_res)
111 {
112 	vm->clear_range(vm, vma_res->start, vma_res->vma_size);
113 }
114 
dpt_cleanup(struct i915_address_space * vm)115 static void dpt_cleanup(struct i915_address_space *vm)
116 {
117 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
118 
119 	i915_gem_object_put(dpt->obj);
120 }
121 
intel_dpt_pin(struct i915_address_space * vm)122 struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
123 {
124 	struct drm_i915_private *i915 = vm->i915;
125 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
126 	intel_wakeref_t wakeref;
127 	struct i915_vma *vma;
128 	void __iomem *iomem;
129 	struct i915_gem_ww_ctx ww;
130 	int err;
131 
132 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
133 	atomic_inc(&i915->gpu_error.pending_fb_pin);
134 
135 	for_i915_gem_ww(&ww, err, true) {
136 		err = i915_gem_object_lock(dpt->obj, &ww);
137 		if (err)
138 			continue;
139 
140 		vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
141 						  HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
142 		if (IS_ERR(vma)) {
143 			err = PTR_ERR(vma);
144 			continue;
145 		}
146 
147 		iomem = i915_vma_pin_iomap(vma);
148 		i915_vma_unpin(vma);
149 
150 		if (IS_ERR(iomem)) {
151 			err = PTR_ERR(iomem);
152 			continue;
153 		}
154 
155 		dpt->vma = vma;
156 		dpt->iomem = iomem;
157 
158 		i915_vma_get(vma);
159 	}
160 
161 	atomic_dec(&i915->gpu_error.pending_fb_pin);
162 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
163 
164 	return err ? ERR_PTR(err) : vma;
165 }
166 
intel_dpt_unpin(struct i915_address_space * vm)167 void intel_dpt_unpin(struct i915_address_space *vm)
168 {
169 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
170 
171 	i915_vma_unpin_iomap(dpt->vma);
172 	i915_vma_put(dpt->vma);
173 }
174 
175 /**
176  * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
177  * @i915: device instance
178  *
179  * Restore the memory mapping during system resume for all framebuffers which
180  * are mapped to HW via a GGTT->DPT page table. The content of these page
181  * tables are not stored in the hibernation image during S4 and S3RST->S4
182  * transitions, so here we reprogram the PTE entries in those tables.
183  *
184  * This function must be called after the mappings in GGTT have been restored calling
185  * i915_ggtt_resume().
186  */
intel_dpt_resume(struct drm_i915_private * i915)187 void intel_dpt_resume(struct drm_i915_private *i915)
188 {
189 	struct drm_framebuffer *drm_fb;
190 
191 	if (!HAS_DISPLAY(i915))
192 		return;
193 
194 	mutex_lock(&i915->drm.mode_config.fb_lock);
195 	drm_for_each_fb(drm_fb, &i915->drm) {
196 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
197 
198 		if (fb->dpt_vm)
199 			i915_ggtt_resume_vm(fb->dpt_vm);
200 	}
201 	mutex_unlock(&i915->drm.mode_config.fb_lock);
202 }
203 
204 /**
205  * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
206  * @i915: device instance
207  *
208  * Suspend the memory mapping during system suspend for all framebuffers which
209  * are mapped to HW via a GGTT->DPT page table.
210  *
211  * This function must be called before the mappings in GGTT are suspended calling
212  * i915_ggtt_suspend().
213  */
intel_dpt_suspend(struct drm_i915_private * i915)214 void intel_dpt_suspend(struct drm_i915_private *i915)
215 {
216 	struct drm_framebuffer *drm_fb;
217 
218 	if (!HAS_DISPLAY(i915))
219 		return;
220 
221 	mutex_lock(&i915->drm.mode_config.fb_lock);
222 
223 	drm_for_each_fb(drm_fb, &i915->drm) {
224 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
225 
226 		if (fb->dpt_vm)
227 			i915_ggtt_suspend_vm(fb->dpt_vm);
228 	}
229 
230 	mutex_unlock(&i915->drm.mode_config.fb_lock);
231 }
232 
233 struct i915_address_space *
intel_dpt_create(struct intel_framebuffer * fb)234 intel_dpt_create(struct intel_framebuffer *fb)
235 {
236 	struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
237 	struct drm_i915_private *i915 = to_i915(obj->dev);
238 	struct drm_i915_gem_object *dpt_obj;
239 	struct i915_address_space *vm;
240 	struct i915_dpt *dpt;
241 	size_t size;
242 	int ret;
243 
244 	if (intel_fb_needs_pot_stride_remap(fb))
245 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
246 	else
247 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
248 
249 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
250 
251 	if (HAS_LMEM(i915))
252 		dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
253 	else
254 		dpt_obj = i915_gem_object_create_stolen(i915, size);
255 	if (IS_ERR(dpt_obj))
256 		return ERR_CAST(dpt_obj);
257 
258 	ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
259 	if (!ret) {
260 		ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
261 		i915_gem_object_unlock(dpt_obj);
262 	}
263 	if (ret) {
264 		i915_gem_object_put(dpt_obj);
265 		return ERR_PTR(ret);
266 	}
267 
268 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
269 	if (!dpt) {
270 		i915_gem_object_put(dpt_obj);
271 		return ERR_PTR(-ENOMEM);
272 	}
273 
274 	vm = &dpt->vm;
275 
276 	vm->gt = to_gt(i915);
277 	vm->i915 = i915;
278 	vm->dma = i915->drm.dev;
279 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
280 	vm->is_dpt = true;
281 
282 	i915_address_space_init(vm, VM_CLASS_DPT);
283 
284 	vm->insert_page = dpt_insert_page;
285 	vm->clear_range = dpt_clear_range;
286 	vm->insert_entries = dpt_insert_entries;
287 	vm->cleanup = dpt_cleanup;
288 
289 	vm->vma_ops.bind_vma    = dpt_bind_vma;
290 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
291 
292 	vm->pte_encode = gen8_ggtt_pte_encode;
293 
294 	dpt->obj = dpt_obj;
295 
296 	return &dpt->vm;
297 }
298 
intel_dpt_destroy(struct i915_address_space * vm)299 void intel_dpt_destroy(struct i915_address_space *vm)
300 {
301 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
302 
303 	i915_vm_put(&dpt->vm);
304 }
305