1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9
10 #include "i915_drv.h"
11 #include "intel_engine.h"
12 #include "intel_engine_user.h"
13 #include "intel_gt.h"
14 #include "uc/intel_guc_submission.h"
15
16 struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private * i915,u8 class,u8 instance)17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19 struct rb_node *p = i915->uabi_engines.rb_node;
20
21 while (p) {
22 struct intel_engine_cs *it =
23 rb_entry(p, typeof(*it), uabi_node);
24
25 if (class < it->uabi_class)
26 p = p->rb_left;
27 else if (class > it->uabi_class ||
28 instance > it->uabi_instance)
29 p = p->rb_right;
30 else if (instance < it->uabi_instance)
31 p = p->rb_left;
32 else
33 return it;
34 }
35
36 return NULL;
37 }
38
intel_engine_add_user(struct intel_engine_cs * engine)39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41 llist_add((struct llist_node *)&engine->uabi_node,
42 (struct llist_head *)&engine->i915->uabi_engines);
43 }
44
45 static const u8 uabi_classes[] = {
46 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
47 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
48 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
50 [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
51 };
52
engine_cmp(void * priv,const struct list_head * A,const struct list_head * B)53 static int engine_cmp(void *priv, const struct list_head *A,
54 const struct list_head *B)
55 {
56 const struct intel_engine_cs *a =
57 container_of((struct rb_node *)A, typeof(*a), uabi_node);
58 const struct intel_engine_cs *b =
59 container_of((struct rb_node *)B, typeof(*b), uabi_node);
60
61 if (uabi_classes[a->class] < uabi_classes[b->class])
62 return -1;
63 if (uabi_classes[a->class] > uabi_classes[b->class])
64 return 1;
65
66 if (a->instance < b->instance)
67 return -1;
68 if (a->instance > b->instance)
69 return 1;
70
71 return 0;
72 }
73
get_engines(struct drm_i915_private * i915)74 static struct llist_node *get_engines(struct drm_i915_private *i915)
75 {
76 return llist_del_all((struct llist_head *)&i915->uabi_engines);
77 }
78
sort_engines(struct drm_i915_private * i915,struct list_head * engines)79 static void sort_engines(struct drm_i915_private *i915,
80 struct list_head *engines)
81 {
82 struct llist_node *pos, *next;
83
84 llist_for_each_safe(pos, next, get_engines(i915)) {
85 struct intel_engine_cs *engine =
86 container_of((struct rb_node *)pos, typeof(*engine),
87 uabi_node);
88 list_add((struct list_head *)&engine->uabi_node, engines);
89 }
90 list_sort(NULL, engines, engine_cmp);
91 }
92
set_scheduler_caps(struct drm_i915_private * i915)93 static void set_scheduler_caps(struct drm_i915_private *i915)
94 {
95 static const struct {
96 u8 engine;
97 u8 sched;
98 } map[] = {
99 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
100 MAP(HAS_PREEMPTION, PREEMPTION),
101 MAP(HAS_SEMAPHORES, SEMAPHORES),
102 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
103 #undef MAP
104 };
105 struct intel_engine_cs *engine;
106 u32 enabled, disabled;
107
108 enabled = 0;
109 disabled = 0;
110 for_each_uabi_engine(engine, i915) { /* all engines must agree! */
111 int i;
112
113 if (engine->sched_engine->schedule)
114 enabled |= (I915_SCHEDULER_CAP_ENABLED |
115 I915_SCHEDULER_CAP_PRIORITY);
116 else
117 disabled |= (I915_SCHEDULER_CAP_ENABLED |
118 I915_SCHEDULER_CAP_PRIORITY);
119
120 if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
121 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
122
123 for (i = 0; i < ARRAY_SIZE(map); i++) {
124 if (engine->flags & BIT(map[i].engine))
125 enabled |= BIT(map[i].sched);
126 else
127 disabled |= BIT(map[i].sched);
128 }
129 }
130
131 i915->caps.scheduler = enabled & ~disabled;
132 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
133 i915->caps.scheduler = 0;
134 }
135
intel_engine_class_repr(u8 class)136 const char *intel_engine_class_repr(u8 class)
137 {
138 static const char * const uabi_names[] = {
139 [RENDER_CLASS] = "rcs",
140 [COPY_ENGINE_CLASS] = "bcs",
141 [VIDEO_DECODE_CLASS] = "vcs",
142 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
143 [COMPUTE_CLASS] = "ccs",
144 };
145
146 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
147 return "xxx";
148
149 return uabi_names[class];
150 }
151
152 struct legacy_ring {
153 struct intel_gt *gt;
154 u8 class;
155 u8 instance;
156 };
157
legacy_ring_idx(const struct legacy_ring * ring)158 static int legacy_ring_idx(const struct legacy_ring *ring)
159 {
160 static const struct {
161 u8 base, max;
162 } map[] = {
163 [RENDER_CLASS] = { RCS0, 1 },
164 [COPY_ENGINE_CLASS] = { BCS0, 1 },
165 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
166 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
167 [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
168 };
169
170 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
171 return INVALID_ENGINE;
172
173 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
174 return INVALID_ENGINE;
175
176 return map[ring->class].base + ring->instance;
177 }
178
add_legacy_ring(struct legacy_ring * ring,struct intel_engine_cs * engine)179 static void add_legacy_ring(struct legacy_ring *ring,
180 struct intel_engine_cs *engine)
181 {
182 if (engine->gt != ring->gt || engine->class != ring->class) {
183 ring->gt = engine->gt;
184 ring->class = engine->class;
185 ring->instance = 0;
186 }
187
188 engine->legacy_idx = legacy_ring_idx(ring);
189 if (engine->legacy_idx != INVALID_ENGINE)
190 ring->instance++;
191 }
192
intel_engines_driver_register(struct drm_i915_private * i915)193 void intel_engines_driver_register(struct drm_i915_private *i915)
194 {
195 struct legacy_ring ring = {};
196 struct list_head *it, *next;
197 struct rb_node **p, *prev;
198 LIST_HEAD(engines);
199
200 sort_engines(i915, &engines);
201
202 prev = NULL;
203 p = &i915->uabi_engines.rb_node;
204 list_for_each_safe(it, next, &engines) {
205 struct intel_engine_cs *engine =
206 container_of((struct rb_node *)it, typeof(*engine),
207 uabi_node);
208 char old[sizeof(engine->name)];
209
210 if (intel_gt_has_unrecoverable_error(engine->gt))
211 continue; /* ignore incomplete engines */
212
213 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
214 engine->uabi_class = uabi_classes[engine->class];
215
216 GEM_BUG_ON(engine->uabi_class >=
217 ARRAY_SIZE(i915->engine_uabi_class_count));
218 engine->uabi_instance =
219 i915->engine_uabi_class_count[engine->uabi_class]++;
220
221 /* Replace the internal name with the final user facing name */
222 memcpy(old, engine->name, sizeof(engine->name));
223 scnprintf(engine->name, sizeof(engine->name), "%s%u",
224 intel_engine_class_repr(engine->class),
225 engine->uabi_instance);
226 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
227
228 rb_link_node(&engine->uabi_node, prev, p);
229 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
230
231 GEM_BUG_ON(intel_engine_lookup_user(i915,
232 engine->uabi_class,
233 engine->uabi_instance) != engine);
234
235 /* Fix up the mapping to match default execbuf::user_map[] */
236 add_legacy_ring(&ring, engine);
237
238 prev = &engine->uabi_node;
239 p = &prev->rb_right;
240 }
241
242 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
243 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
244 struct intel_engine_cs *engine;
245 unsigned int isolation;
246 int class, inst;
247 int errors = 0;
248
249 for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
250 for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
251 engine = intel_engine_lookup_user(i915,
252 class, inst);
253 if (!engine) {
254 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
255 class, inst);
256 errors++;
257 continue;
258 }
259
260 if (engine->uabi_class != class ||
261 engine->uabi_instance != inst) {
262 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
263 engine->name,
264 engine->uabi_class,
265 engine->uabi_instance,
266 class, inst);
267 errors++;
268 continue;
269 }
270 }
271 }
272
273 /*
274 * Make sure that classes with multiple engine instances all
275 * share the same basic configuration.
276 */
277 isolation = intel_engines_has_context_isolation(i915);
278 for_each_uabi_engine(engine, i915) {
279 unsigned int bit = BIT(engine->uabi_class);
280 unsigned int expected = engine->default_state ? bit : 0;
281
282 if ((isolation & bit) != expected) {
283 pr_err("mismatching default context state for class %d on engine %s\n",
284 engine->uabi_class, engine->name);
285 errors++;
286 }
287 }
288
289 if (drm_WARN(&i915->drm, errors,
290 "Invalid UABI engine mapping found"))
291 i915->uabi_engines = RB_ROOT;
292 }
293
294 set_scheduler_caps(i915);
295 }
296
intel_engines_has_context_isolation(struct drm_i915_private * i915)297 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
298 {
299 struct intel_engine_cs *engine;
300 unsigned int which;
301
302 which = 0;
303 for_each_uabi_engine(engine, i915)
304 if (engine->default_state)
305 which |= BIT(engine->uabi_class);
306
307 return which;
308 }
309