1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <uapi/drm/i915_drm.h>
7
8 #include "intel_memory_region.h"
9 #include "gem/i915_gem_region.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "i915_drv.h"
12
13 void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object * obj,unsigned long n,unsigned long size)14 i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
15 unsigned long n,
16 unsigned long size)
17 {
18 resource_size_t offset;
19
20 GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
21
22 offset = i915_gem_object_get_dma_address(obj, n);
23 offset -= obj->mm.region->region.start;
24
25 return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
26 }
27
28 /**
29 * i915_gem_object_is_lmem - Whether the object is resident in
30 * lmem
31 * @obj: The object to check.
32 *
33 * Even if an object is allowed to migrate and change memory region,
34 * this function checks whether it will always be present in lmem when
35 * valid *or* if that's not the case, whether it's currently resident in lmem.
36 * For migratable and evictable objects, the latter only makes sense when
37 * the object is locked.
38 *
39 * Return: Whether the object migratable but resident in lmem, or not
40 * migratable and will be present in lmem when valid.
41 */
i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)42 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
43 {
44 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
45
46 #ifdef CONFIG_LOCKDEP
47 if (i915_gem_object_migratable(obj) &&
48 i915_gem_object_evictable(obj))
49 assert_object_held(obj);
50 #endif
51 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
52 mr->type == INTEL_MEMORY_STOLEN_LOCAL);
53 }
54
55 /**
56 * __i915_gem_object_is_lmem - Whether the object is resident in
57 * lmem while in the fence signaling critical path.
58 * @obj: The object to check.
59 *
60 * This function is intended to be called from within the fence signaling
61 * path where the fence, or a pin, keeps the object from being migrated. For
62 * example during gpu reset or similar.
63 *
64 * Return: Whether the object is resident in lmem.
65 */
__i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)66 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
67 {
68 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
69
70 #ifdef CONFIG_LOCKDEP
71 GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
72 i915_gem_object_evictable(obj));
73 #endif
74 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
75 mr->type == INTEL_MEMORY_STOLEN_LOCAL);
76 }
77
78 /**
79 * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
80 * minimum page size for the backing pages.
81 * @i915: The i915 instance.
82 * @size: The size in bytes for the object. Note that we need to round the size
83 * up depending on the @page_size. The final object size can be fished out from
84 * the drm GEM object.
85 * @page_size: The requested minimum page size in bytes for this object. This is
86 * useful if we need something bigger than the regions min_page_size due to some
87 * hw restriction, or in some very specialised cases where it needs to be
88 * smaller, where the internal fragmentation cost is too great when rounding up
89 * the object size.
90 * @flags: The optional BO allocation flags.
91 *
92 * Note that this interface assumes you know what you are doing when forcing the
93 * @page_size. If this is smaller than the regions min_page_size then it can
94 * never be inserted into any GTT, otherwise it might lead to undefined
95 * behaviour.
96 *
97 * Return: The object pointer, which might be an ERR_PTR in the case of failure.
98 */
99 struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private * i915,resource_size_t size,resource_size_t page_size,unsigned int flags)100 __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
101 resource_size_t size,
102 resource_size_t page_size,
103 unsigned int flags)
104 {
105 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
106 size, page_size, flags);
107 }
108
109 struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private * i915,const void * data,size_t size)110 i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
111 const void *data, size_t size)
112 {
113 struct drm_i915_gem_object *obj;
114 void *map;
115
116 obj = i915_gem_object_create_lmem(i915,
117 round_up(size, PAGE_SIZE),
118 I915_BO_ALLOC_CONTIGUOUS);
119 if (IS_ERR(obj))
120 return obj;
121
122 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
123 if (IS_ERR(map)) {
124 i915_gem_object_put(obj);
125 return map;
126 }
127
128 memcpy(map, data, size);
129
130 i915_gem_object_flush_map(obj);
131 __i915_gem_object_release_map(obj);
132
133 return obj;
134 }
135
136 struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private * i915,resource_size_t size,unsigned int flags)137 i915_gem_object_create_lmem(struct drm_i915_private *i915,
138 resource_size_t size,
139 unsigned int flags)
140 {
141 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
142 size, 0, flags);
143 }
144