1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2020 Intel Corporation
4 *
5 * Please try to maintain the following order within this file unless it makes
6 * sense to do otherwise. From top to bottom:
7 * 1. typedefs
8 * 2. #defines, and macros
9 * 3. structure definitions
10 * 4. function prototypes
11 *
12 * Within each section, please try to order by generation in ascending order,
13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14 */
15
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25
26 #include <drm/drm_mm.h>
27
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_resource.h"
31 #include "i915_vma_types.h"
32 #include "i915_params.h"
33 #include "intel_memory_region.h"
34
35 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
36
37 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
38 #define DBG(...) trace_printk(__VA_ARGS__)
39 #else
40 #define DBG(...)
41 #endif
42
43 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
44
45 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
46 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
47 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
48
49 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51
52 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53
54 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
55
56 #define I915_FENCE_REG_NONE -1
57 #define I915_MAX_NUM_FENCES 32
58 /* 32 fences + sign bit for FENCE_REG_NONE */
59 #define I915_MAX_NUM_FENCE_BITS 6
60
61 typedef u32 gen6_pte_t;
62 typedef u64 gen8_pte_t;
63
64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
65
66 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
67 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
68 #define I915_PDES 512
69 #define I915_PDE_MASK (I915_PDES - 1)
70
71 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
72 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
73 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
74 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
75 #define GEN6_PTE_CACHE_LLC (2 << 1)
76 #define GEN6_PTE_UNCACHED (1 << 1)
77 #define GEN6_PTE_VALID REG_BIT(0)
78
79 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
80 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
81 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
82 #define GEN6_PDE_SHIFT 22
83 #define GEN6_PDE_VALID REG_BIT(0)
84 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
85
86 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
87
88 #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
89 #define BYT_PTE_WRITEABLE REG_BIT(1)
90
91 #define GEN12_PPGTT_PTE_LM BIT_ULL(11)
92
93 #define GEN12_GGTT_PTE_LM BIT_ULL(1)
94
95 #define GEN12_PDE_64K BIT(6)
96
97 /*
98 * Cacheability Control is a 4-bit value. The low three bits are stored in bits
99 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
100 */
101 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
102 (((bits) & 0x8) << (11 - 3)))
103 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
104 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
105 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
106 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
107 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
108 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
109 #define HSW_PTE_UNCACHED (0)
110 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
111 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
112
113 /*
114 * GEN8 32b style address is defined as a 3 level page table:
115 * 31:30 | 29:21 | 20:12 | 11:0
116 * PDPE | PDE | PTE | offset
117 * The difference as compared to normal x86 3 level page table is the PDPEs are
118 * programmed via register.
119 *
120 * GEN8 48b style address is defined as a 4 level page table:
121 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
122 * PML4E | PDPE | PDE | PTE | offset
123 */
124 #define GEN8_3LVL_PDPES 4
125
126 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
127 #define PPAT_CACHED_PDE 0 /* WB LLC */
128 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
129 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
130
131 #define CHV_PPAT_SNOOP REG_BIT(6)
132 #define GEN8_PPAT_AGE(x) ((x)<<4)
133 #define GEN8_PPAT_LLCeLLC (3<<2)
134 #define GEN8_PPAT_LLCELLC (2<<2)
135 #define GEN8_PPAT_LLC (1<<2)
136 #define GEN8_PPAT_WB (3<<0)
137 #define GEN8_PPAT_WT (2<<0)
138 #define GEN8_PPAT_WC (1<<0)
139 #define GEN8_PPAT_UC (0<<0)
140 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
141 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
142
143 #define GEN8_PAGE_PRESENT BIT_ULL(0)
144 #define GEN8_PAGE_RW BIT_ULL(1)
145
146 #define GEN8_PDE_IPS_64K BIT(11)
147 #define GEN8_PDE_PS_2M BIT(7)
148
149 enum i915_cache_level;
150
151 struct drm_i915_gem_object;
152 struct i915_fence_reg;
153 struct i915_vma;
154 struct intel_gt;
155
156 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
157 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
158
159 struct i915_page_table {
160 struct drm_i915_gem_object *base;
161 union {
162 atomic_t used;
163 struct i915_page_table *stash;
164 };
165 bool is_compact;
166 };
167
168 struct i915_page_directory {
169 struct i915_page_table pt;
170 spinlock_t lock;
171 void **entry;
172 };
173
174 #define __px_choose_expr(x, type, expr, other) \
175 __builtin_choose_expr( \
176 __builtin_types_compatible_p(typeof(x), type) || \
177 __builtin_types_compatible_p(typeof(x), const type), \
178 ({ type __x = (type)(x); expr; }), \
179 other)
180
181 #define px_base(px) \
182 __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
183 __px_choose_expr(px, struct i915_page_table *, __x->base, \
184 __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
185 (void)0)))
186
187 struct page *__px_page(struct drm_i915_gem_object *p);
188 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
189 #define px_dma(px) (__px_dma(px_base(px)))
190
191 void *__px_vaddr(struct drm_i915_gem_object *p);
192 #define px_vaddr(px) (__px_vaddr(px_base(px)))
193
194 #define px_pt(px) \
195 __px_choose_expr(px, struct i915_page_table *, __x, \
196 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
197 (void)0))
198 #define px_used(px) (&px_pt(px)->used)
199
200 struct i915_vm_pt_stash {
201 /* preallocated chains of page tables/directories */
202 struct i915_page_table *pt[2];
203 /*
204 * Optionally override the alignment/size of the physical page that
205 * contains each PT. If not set defaults back to the usual
206 * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
207 * structures. MUST be a power-of-two. ONLY applicable on discrete
208 * platforms.
209 */
210 int pt_sz;
211 };
212
213 struct i915_vma_ops {
214 /* Map an object into an address space with the given cache flags. */
215 void (*bind_vma)(struct i915_address_space *vm,
216 struct i915_vm_pt_stash *stash,
217 struct i915_vma_resource *vma_res,
218 enum i915_cache_level cache_level,
219 u32 flags);
220 /*
221 * Unmap an object from an address space. This usually consists of
222 * setting the valid PTE entries to a reserved scratch page.
223 */
224 void (*unbind_vma)(struct i915_address_space *vm,
225 struct i915_vma_resource *vma_res);
226
227 };
228
229 struct i915_address_space {
230 struct kref ref;
231 struct work_struct release_work;
232
233 struct drm_mm mm;
234 struct intel_gt *gt;
235 struct drm_i915_private *i915;
236 struct device *dma;
237 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
238 u64 reserved; /* size addr space reserved */
239 u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
240
241 unsigned int bind_async_flags;
242
243 struct mutex mutex; /* protects vma and our lists */
244
245 struct kref resv_ref; /* kref to keep the reservation lock alive. */
246 struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
247 #define VM_CLASS_GGTT 0
248 #define VM_CLASS_PPGTT 1
249 #define VM_CLASS_DPT 2
250
251 struct drm_i915_gem_object *scratch[4];
252 /**
253 * List of vma currently bound.
254 */
255 struct list_head bound_list;
256
257 /**
258 * List of vmas not yet bound or evicted.
259 */
260 struct list_head unbound_list;
261
262 /* Global GTT */
263 bool is_ggtt:1;
264
265 /* Display page table */
266 bool is_dpt:1;
267
268 /* Some systems support read-only mappings for GGTT and/or PPGTT */
269 bool has_read_only:1;
270
271 /* Skip pte rewrite on unbind for suspend. Protected by @mutex */
272 bool skip_pte_rewrite:1;
273
274 u8 top;
275 u8 pd_shift;
276 u8 scratch_order;
277
278 /* Flags used when creating page-table objects for this vm */
279 unsigned long lmem_pt_obj_flags;
280
281 /* Interval tree for pending unbind vma resources */
282 struct rb_root_cached pending_unbind;
283
284 struct drm_i915_gem_object *
285 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
286 struct drm_i915_gem_object *
287 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
288
289 u64 (*pte_encode)(dma_addr_t addr,
290 enum i915_cache_level level,
291 u32 flags); /* Create a valid PTE */
292 #define PTE_READ_ONLY BIT(0)
293 #define PTE_LM BIT(1)
294
295 void (*allocate_va_range)(struct i915_address_space *vm,
296 struct i915_vm_pt_stash *stash,
297 u64 start, u64 length);
298 void (*clear_range)(struct i915_address_space *vm,
299 u64 start, u64 length);
300 void (*insert_page)(struct i915_address_space *vm,
301 dma_addr_t addr,
302 u64 offset,
303 enum i915_cache_level cache_level,
304 u32 flags);
305 void (*insert_entries)(struct i915_address_space *vm,
306 struct i915_vma_resource *vma_res,
307 enum i915_cache_level cache_level,
308 u32 flags);
309 void (*raw_insert_page)(struct i915_address_space *vm,
310 dma_addr_t addr,
311 u64 offset,
312 enum i915_cache_level cache_level,
313 u32 flags);
314 void (*raw_insert_entries)(struct i915_address_space *vm,
315 struct i915_vma_resource *vma_res,
316 enum i915_cache_level cache_level,
317 u32 flags);
318 void (*cleanup)(struct i915_address_space *vm);
319
320 void (*foreach)(struct i915_address_space *vm,
321 u64 start, u64 length,
322 void (*fn)(struct i915_address_space *vm,
323 struct i915_page_table *pt,
324 void *data),
325 void *data);
326
327 struct i915_vma_ops vma_ops;
328
329 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
330 I915_SELFTEST_DECLARE(bool scrub_64K);
331 };
332
333 /*
334 * The Graphics Translation Table is the way in which GEN hardware translates a
335 * Graphics Virtual Address into a Physical Address. In addition to the normal
336 * collateral associated with any va->pa translations GEN hardware also has a
337 * portion of the GTT which can be mapped by the CPU and remain both coherent
338 * and correct (in cases like swizzling). That region is referred to as GMADR in
339 * the spec.
340 */
341 struct i915_ggtt {
342 struct i915_address_space vm;
343
344 struct io_mapping iomap; /* Mapping to our CPU mappable region */
345 struct resource gmadr; /* GMADR resource */
346 resource_size_t mappable_end; /* End offset that we can CPU map */
347
348 /** "Graphics Stolen Memory" holds the global PTEs */
349 void __iomem *gsm;
350 void (*invalidate)(struct i915_ggtt *ggtt);
351
352 /** PPGTT used for aliasing the PPGTT with the GTT */
353 struct i915_ppgtt *alias;
354
355 bool do_idle_maps;
356
357 /**
358 * @pte_lost: Are ptes lost on resume?
359 *
360 * Whether the system was recently restored from hibernate and
361 * thus may have lost pte content.
362 */
363 bool pte_lost;
364
365 /**
366 * @probed_pte: Probed pte value on suspend. Re-checked on resume.
367 */
368 u64 probed_pte;
369
370 int mtrr;
371
372 /** Bit 6 swizzling required for X tiling */
373 u32 bit_6_swizzle_x;
374 /** Bit 6 swizzling required for Y tiling */
375 u32 bit_6_swizzle_y;
376
377 u32 pin_bias;
378
379 unsigned int num_fences;
380 struct i915_fence_reg *fence_regs;
381 struct list_head fence_list;
382
383 /**
384 * List of all objects in gtt_space, currently mmaped by userspace.
385 * All objects within this list must also be on bound_list.
386 */
387 struct list_head userfault_list;
388
389 struct mutex error_mutex;
390 struct drm_mm_node error_capture;
391 struct drm_mm_node uc_fw;
392 };
393
394 struct i915_ppgtt {
395 struct i915_address_space vm;
396
397 struct i915_page_directory *pd;
398 };
399
400 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
401 #define i915_is_dpt(vm) ((vm)->is_dpt)
402 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
403
404 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
405
406 int __must_check
407 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
408
409 static inline bool
i915_vm_is_4lvl(const struct i915_address_space * vm)410 i915_vm_is_4lvl(const struct i915_address_space *vm)
411 {
412 return (vm->total - 1) >> 32;
413 }
414
415 static inline bool
i915_vm_has_scratch_64K(struct i915_address_space * vm)416 i915_vm_has_scratch_64K(struct i915_address_space *vm)
417 {
418 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
419 }
420
i915_vm_min_alignment(struct i915_address_space * vm,enum intel_memory_type type)421 static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
422 enum intel_memory_type type)
423 {
424 /* avoid INTEL_MEMORY_MOCK overflow */
425 if ((int)type >= ARRAY_SIZE(vm->min_alignment))
426 type = INTEL_MEMORY_SYSTEM;
427
428 return vm->min_alignment[type];
429 }
430
i915_vm_obj_min_alignment(struct i915_address_space * vm,struct drm_i915_gem_object * obj)431 static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
432 struct drm_i915_gem_object *obj)
433 {
434 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
435 enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
436
437 return i915_vm_min_alignment(vm, type);
438 }
439
440 static inline bool
i915_vm_has_cache_coloring(struct i915_address_space * vm)441 i915_vm_has_cache_coloring(struct i915_address_space *vm)
442 {
443 return i915_is_ggtt(vm) && vm->mm.color_adjust;
444 }
445
446 static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space * vm)447 i915_vm_to_ggtt(struct i915_address_space *vm)
448 {
449 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
450 GEM_BUG_ON(!i915_is_ggtt(vm));
451 return container_of(vm, struct i915_ggtt, vm);
452 }
453
454 static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space * vm)455 i915_vm_to_ppgtt(struct i915_address_space *vm)
456 {
457 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
458 GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
459 return container_of(vm, struct i915_ppgtt, vm);
460 }
461
462 static inline struct i915_address_space *
i915_vm_get(struct i915_address_space * vm)463 i915_vm_get(struct i915_address_space *vm)
464 {
465 kref_get(&vm->ref);
466 return vm;
467 }
468
469 static inline struct i915_address_space *
i915_vm_tryget(struct i915_address_space * vm)470 i915_vm_tryget(struct i915_address_space *vm)
471 {
472 return kref_get_unless_zero(&vm->ref) ? vm : NULL;
473 }
474
assert_vm_alive(struct i915_address_space * vm)475 static inline void assert_vm_alive(struct i915_address_space *vm)
476 {
477 GEM_BUG_ON(!kref_read(&vm->ref));
478 }
479
480 /**
481 * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
482 * @vm: The vm whose reservation lock we want to share.
483 *
484 * Return: A pointer to the vm's reservation lock.
485 */
i915_vm_resv_get(struct i915_address_space * vm)486 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
487 {
488 kref_get(&vm->resv_ref);
489 return &vm->_resv;
490 }
491
492 void i915_vm_release(struct kref *kref);
493
494 void i915_vm_resv_release(struct kref *kref);
495
i915_vm_put(struct i915_address_space * vm)496 static inline void i915_vm_put(struct i915_address_space *vm)
497 {
498 kref_put(&vm->ref, i915_vm_release);
499 }
500
501 /**
502 * i915_vm_resv_put - Release a reference on the vm's reservation lock
503 * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
504 */
i915_vm_resv_put(struct i915_address_space * vm)505 static inline void i915_vm_resv_put(struct i915_address_space *vm)
506 {
507 kref_put(&vm->resv_ref, i915_vm_resv_release);
508 }
509
510 void i915_address_space_init(struct i915_address_space *vm, int subclass);
511 void i915_address_space_fini(struct i915_address_space *vm);
512
i915_pte_index(u64 address,unsigned int pde_shift)513 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
514 {
515 const u32 mask = NUM_PTE(pde_shift) - 1;
516
517 return (address >> PAGE_SHIFT) & mask;
518 }
519
520 /*
521 * Helper to counts the number of PTEs within the given length. This count
522 * does not cross a page table boundary, so the max value would be
523 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
524 */
i915_pte_count(u64 addr,u64 length,unsigned int pde_shift)525 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
526 {
527 const u64 mask = ~((1ULL << pde_shift) - 1);
528 u64 end;
529
530 GEM_BUG_ON(length == 0);
531 GEM_BUG_ON(offset_in_page(addr | length));
532
533 end = addr + length;
534
535 if ((addr & mask) != (end & mask))
536 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
537
538 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
539 }
540
i915_pde_index(u64 addr,u32 shift)541 static inline u32 i915_pde_index(u64 addr, u32 shift)
542 {
543 return (addr >> shift) & I915_PDE_MASK;
544 }
545
546 static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,const unsigned short n)547 i915_pt_entry(const struct i915_page_directory * const pd,
548 const unsigned short n)
549 {
550 return pd->entry[n];
551 }
552
553 static inline struct i915_page_directory *
i915_pd_entry(const struct i915_page_directory * const pdp,const unsigned short n)554 i915_pd_entry(const struct i915_page_directory * const pdp,
555 const unsigned short n)
556 {
557 return pdp->entry[n];
558 }
559
560 static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt * ppgtt,const unsigned int n)561 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
562 {
563 struct i915_page_table *pt = ppgtt->pd->entry[n];
564
565 return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
566 }
567
568 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
569 unsigned long lmem_pt_obj_flags);
570 void intel_ggtt_bind_vma(struct i915_address_space *vm,
571 struct i915_vm_pt_stash *stash,
572 struct i915_vma_resource *vma_res,
573 enum i915_cache_level cache_level,
574 u32 flags);
575 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
576 struct i915_vma_resource *vma_res);
577
578 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
579 int i915_ggtt_init_hw(struct drm_i915_private *i915);
580 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
581 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
582 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
583 int i915_init_ggtt(struct drm_i915_private *i915);
584 void i915_ggtt_driver_release(struct drm_i915_private *i915);
585 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
586
i915_ggtt_has_aperture(const struct i915_ggtt * ggtt)587 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
588 {
589 return ggtt->mappable_end > 0;
590 }
591
592 int i915_ppgtt_init_hw(struct intel_gt *gt);
593
594 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
595 unsigned long lmem_pt_obj_flags);
596
597 void i915_ggtt_suspend_vm(struct i915_address_space *vm);
598 bool i915_ggtt_resume_vm(struct i915_address_space *vm);
599 void i915_ggtt_suspend(struct i915_ggtt *gtt);
600 void i915_ggtt_resume(struct i915_ggtt *ggtt);
601
602 /**
603 * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
604 * @i915 The device private.
605 * @val whether the ptes should be marked as lost.
606 *
607 * In some cases pte content is retained across suspend, but typically lost
608 * across hibernate. Typically they should be marked as lost on
609 * hibernation restore and such marking cleared on suspend.
610 */
611 void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
612
613 void
614 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
615
616 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
617 #define fill32_px(px, v) do { \
618 u64 v__ = lower_32_bits(v); \
619 fill_px((px), v__ << 32 | v__); \
620 } while (0)
621
622 int setup_scratch_page(struct i915_address_space *vm);
623 void free_scratch(struct i915_address_space *vm);
624
625 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
626 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
627 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
628 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
629 struct i915_page_directory *__alloc_pd(int npde);
630
631 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
632 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
633
634 void free_px(struct i915_address_space *vm,
635 struct i915_page_table *pt, int lvl);
636 #define free_pt(vm, px) free_px(vm, px, 0)
637 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
638
639 void
640 __set_pd_entry(struct i915_page_directory * const pd,
641 const unsigned short idx,
642 struct i915_page_table *pt,
643 u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
644
645 #define set_pd_entry(pd, idx, to) \
646 __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
647
648 void
649 clear_pd_entry(struct i915_page_directory * const pd,
650 const unsigned short idx,
651 const struct drm_i915_gem_object * const scratch);
652
653 bool
654 release_pd_entry(struct i915_page_directory * const pd,
655 const unsigned short idx,
656 struct i915_page_table * const pt,
657 const struct drm_i915_gem_object * const scratch);
658 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
659
660 void ppgtt_bind_vma(struct i915_address_space *vm,
661 struct i915_vm_pt_stash *stash,
662 struct i915_vma_resource *vma_res,
663 enum i915_cache_level cache_level,
664 u32 flags);
665 void ppgtt_unbind_vma(struct i915_address_space *vm,
666 struct i915_vma_resource *vma_res);
667
668 void gtt_write_workarounds(struct intel_gt *gt);
669
670 void setup_private_pat(struct intel_uncore *uncore);
671
672 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
673 struct i915_vm_pt_stash *stash,
674 u64 size);
675 int i915_vm_map_pt_stash(struct i915_address_space *vm,
676 struct i915_vm_pt_stash *stash);
677 void i915_vm_free_pt_stash(struct i915_address_space *vm,
678 struct i915_vm_pt_stash *stash);
679
680 struct i915_vma *
681 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
682
683 struct i915_vma *
684 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
685
686 static inline struct sgt_dma {
687 struct scatterlist *sg;
688 dma_addr_t dma, max;
sgt_dma(struct i915_vma_resource * vma_res)689 } sgt_dma(struct i915_vma_resource *vma_res) {
690 struct scatterlist *sg = vma_res->bi.pages->sgl;
691 dma_addr_t addr = sg_dma_address(sg);
692
693 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
694 }
695
696 #endif
697