1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016-2019 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_print.h"
16 #include "intel_gsc_binary_headers.h"
17 #include "intel_gsc_fw.h"
18 #include "intel_uc_fw.h"
19 #include "intel_uc_fw_abi.h"
20 #include "i915_drv.h"
21 #include "i915_reg.h"
22
23 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
24 #define UNEXPECTED gt_probe_error
25 #else
26 #define UNEXPECTED gt_notice
27 #endif
28
29 static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type)30 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
31 {
32 GEM_BUG_ON(type >= INTEL_UC_FW_NUM_TYPES);
33
34 switch (type) {
35 case INTEL_UC_FW_TYPE_GUC:
36 return container_of(uc_fw, struct intel_gt, uc.guc.fw);
37 case INTEL_UC_FW_TYPE_HUC:
38 return container_of(uc_fw, struct intel_gt, uc.huc.fw);
39 case INTEL_UC_FW_TYPE_GSC:
40 return container_of(uc_fw, struct intel_gt, uc.gsc.fw);
41 }
42
43 return NULL;
44 }
45
__uc_fw_to_gt(struct intel_uc_fw * uc_fw)46 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
47 {
48 GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
49 return ____uc_fw_to_gt(uc_fw, uc_fw->type);
50 }
51
52 #ifdef CONFIG_DRM_I915_DEBUG_GUC
intel_uc_fw_change_status(struct intel_uc_fw * uc_fw,enum intel_uc_fw_status status)53 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
54 enum intel_uc_fw_status status)
55 {
56 uc_fw->__status = status;
57 gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
58 intel_uc_fw_type_repr(uc_fw->type),
59 status == INTEL_UC_FIRMWARE_SELECTED ?
60 uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
61 }
62 #endif
63
64 /*
65 * List of required GuC and HuC binaries per-platform.
66 * Must be ordered based on platform + revid, from newer to older.
67 *
68 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
69 * firmware as TGL.
70 *
71 * Version numbers:
72 * Originally, the driver required an exact match major/minor/patch furmware
73 * file and only supported that one version for any given platform. However,
74 * the new direction from upstream is to be backwards compatible with all
75 * prior releases and to be as flexible as possible as to what firmware is
76 * loaded.
77 *
78 * For GuC, the major version number signifies a backwards breaking API change.
79 * So, new format GuC firmware files are labelled by their major version only.
80 * For HuC, there is no KMD interaction, hence no version matching requirement.
81 * So, new format HuC firmware files have no version number at all.
82 *
83 * All of which means that the table below must keep all old format files with
84 * full three point version number. But newer files have reduced requirements.
85 * Having said that, the driver still needs to track the minor version number
86 * for GuC at least. As it is useful to report to the user that they are not
87 * running with a recent enough version for all KMD supported features,
88 * security fixes, etc. to be enabled.
89 */
90 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
91 fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \
92 fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \
93 fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \
94 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
95 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
96 fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \
97 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
98 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
99 fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \
100 fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
101 fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
102 fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
103 fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
104 fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
105 fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
106 fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
107 fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
108 fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
109 fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
110 fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
111 fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
112
113 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp, huc_gsc) \
114 fw_def(METEORLAKE, 0, huc_gsc(mtl)) \
115 fw_def(DG2, 0, huc_gsc(dg2)) \
116 fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
117 fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
118 fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
119 fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
120 fw_def(DG1, 0, huc_raw(dg1)) \
121 fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
122 fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
123 fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
124 fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
125 fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
126 fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
127 fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
128 fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
129 fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
130 fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
131 fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
132 fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
133
134 /*
135 * Set of macros for producing a list of filenames from the above table.
136 */
137 #define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
138 "i915/" \
139 __stringify(prefix_) "_" name_ ".bin"
140
141 #define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
142 "i915/" \
143 __stringify(prefix_) "_" name_ "_" \
144 __stringify(major_) ".bin"
145
146 #define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
147 "i915/" \
148 __stringify(prefix_) "_" name_ "_" \
149 __stringify(major_) "." \
150 __stringify(minor_) "." \
151 __stringify(patch_) ".bin"
152
153 /* Minor for internal driver use, not part of file name */
154 #define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_) \
155 __MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
156
157 #define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
158 __MAKE_UC_FW_PATH_MMP(prefix_, "guc", major_, minor_, patch_)
159
160 #define MAKE_HUC_FW_PATH_BLANK(prefix_) \
161 __MAKE_UC_FW_PATH_BLANK(prefix_, "huc")
162
163 #define MAKE_HUC_FW_PATH_GSC(prefix_) \
164 __MAKE_UC_FW_PATH_BLANK(prefix_, "huc_gsc")
165
166 #define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
167 __MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_)
168
169 /*
170 * All blobs need to be declared via MODULE_FIRMWARE().
171 * This first expansion of the table macros is solely to provide
172 * that declaration.
173 */
174 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
175 MODULE_FIRMWARE(uc_);
176
177 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
178 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP, MAKE_HUC_FW_PATH_GSC)
179
180 /*
181 * The next expansion of the table macros (in __uc_fw_auto_select below) provides
182 * actual data structures with both the filename and the version information.
183 * These structure arrays are then iterated over to the list of suitable files
184 * for the current platform and to then attempt to load those files, in the order
185 * listed, until one is successfully found.
186 */
187 struct __packed uc_fw_blob {
188 const char *path;
189 bool legacy;
190 u8 major;
191 u8 minor;
192 u8 patch;
193 bool has_gsc_headers;
194 };
195
196 #define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
197 .major = major_, \
198 .minor = minor_, \
199 .patch = patch_, \
200 .path = path_,
201
202 #define UC_FW_BLOB_NEW(major_, minor_, patch_, gsc_, path_) \
203 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
204 .legacy = false, .has_gsc_headers = gsc_ }
205
206 #define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
207 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
208 .legacy = true }
209
210 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
211 UC_FW_BLOB_NEW(major_, minor_, patch_, false, \
212 MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_))
213
214 #define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
215 UC_FW_BLOB_OLD(major_, minor_, patch_, \
216 MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
217
218 #define HUC_FW_BLOB(prefix_) \
219 UC_FW_BLOB_NEW(0, 0, 0, false, MAKE_HUC_FW_PATH_BLANK(prefix_))
220
221 #define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
222 UC_FW_BLOB_OLD(major_, minor_, patch_, \
223 MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
224
225 #define HUC_FW_BLOB_GSC(prefix_) \
226 UC_FW_BLOB_NEW(0, 0, 0, true, MAKE_HUC_FW_PATH_GSC(prefix_))
227
228 struct __packed uc_fw_platform_requirement {
229 enum intel_platform p;
230 u8 rev; /* first platform rev using this FW */
231 const struct uc_fw_blob blob;
232 };
233
234 #define MAKE_FW_LIST(platform_, revid_, uc_) \
235 { \
236 .p = INTEL_##platform_, \
237 .rev = revid_, \
238 .blob = uc_, \
239 },
240
241 struct fw_blobs_by_type {
242 const struct uc_fw_platform_requirement *blobs;
243 u32 count;
244 };
245
246 static const struct uc_fw_platform_requirement blobs_guc[] = {
247 INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
248 };
249
250 static const struct uc_fw_platform_requirement blobs_huc[] = {
251 INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
252 };
253
254 static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
255 [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
256 [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
257 };
258
259 static void
__uc_fw_auto_select(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)260 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
261 {
262 const struct uc_fw_platform_requirement *fw_blobs;
263 enum intel_platform p = INTEL_INFO(i915)->platform;
264 u32 fw_count;
265 u8 rev = INTEL_REVID(i915);
266 int i;
267 bool found;
268
269 /*
270 * GSC FW support is still not fully in place, so we're not defining
271 * the FW blob yet because we don't want the driver to attempt to load
272 * it until we're ready for it.
273 */
274 if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
275 return;
276
277 /*
278 * The only difference between the ADL GuC FWs is the HWConfig support.
279 * ADL-N does not support HWConfig, so we should use the same binary as
280 * ADL-S, otherwise the GuC might attempt to fetch a config table that
281 * does not exist.
282 */
283 if (IS_ALDERLAKE_P_N(i915))
284 p = INTEL_ALDERLAKE_S;
285
286 GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
287 fw_blobs = blobs_all[uc_fw->type].blobs;
288 fw_count = blobs_all[uc_fw->type].count;
289
290 found = false;
291 for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
292 const struct uc_fw_blob *blob = &fw_blobs[i].blob;
293
294 if (p != fw_blobs[i].p)
295 continue;
296
297 if (rev < fw_blobs[i].rev)
298 continue;
299
300 if (uc_fw->file_selected.path) {
301 /*
302 * Continuing an earlier search after a found blob failed to load.
303 * Once the previously chosen path has been found, clear it out
304 * and let the search continue from there.
305 */
306 if (uc_fw->file_selected.path == blob->path)
307 uc_fw->file_selected.path = NULL;
308
309 continue;
310 }
311
312 uc_fw->file_selected.path = blob->path;
313 uc_fw->file_wanted.path = blob->path;
314 uc_fw->file_wanted.ver.major = blob->major;
315 uc_fw->file_wanted.ver.minor = blob->minor;
316 uc_fw->file_wanted.ver.patch = blob->patch;
317 uc_fw->has_gsc_headers = blob->has_gsc_headers;
318 found = true;
319 break;
320 }
321
322 if (!found && uc_fw->file_selected.path) {
323 /* Failed to find a match for the last attempt?! */
324 uc_fw->file_selected.path = NULL;
325 }
326 }
327
validate_fw_table_type(struct drm_i915_private * i915,enum intel_uc_fw_type type)328 static bool validate_fw_table_type(struct drm_i915_private *i915, enum intel_uc_fw_type type)
329 {
330 const struct uc_fw_platform_requirement *fw_blobs;
331 u32 fw_count;
332 int i, j;
333
334 if (type >= ARRAY_SIZE(blobs_all)) {
335 drm_err(&i915->drm, "No blob array for %s\n", intel_uc_fw_type_repr(type));
336 return false;
337 }
338
339 fw_blobs = blobs_all[type].blobs;
340 fw_count = blobs_all[type].count;
341
342 if (!fw_count)
343 return true;
344
345 /* make sure the list is ordered as expected */
346 for (i = 1; i < fw_count; i++) {
347 /* Versionless file names must be unique per platform: */
348 for (j = i + 1; j < fw_count; j++) {
349 /* Same platform? */
350 if (fw_blobs[i].p != fw_blobs[j].p)
351 continue;
352
353 if (fw_blobs[i].blob.path != fw_blobs[j].blob.path)
354 continue;
355
356 drm_err(&i915->drm, "Duplicate %s blobs: %s r%u %s%d.%d.%d [%s] matches %s%d.%d.%d [%s]\n",
357 intel_uc_fw_type_repr(type),
358 intel_platform_name(fw_blobs[j].p), fw_blobs[j].rev,
359 fw_blobs[j].blob.legacy ? "L" : "v",
360 fw_blobs[j].blob.major, fw_blobs[j].blob.minor,
361 fw_blobs[j].blob.patch, fw_blobs[j].blob.path,
362 fw_blobs[i].blob.legacy ? "L" : "v",
363 fw_blobs[i].blob.major, fw_blobs[i].blob.minor,
364 fw_blobs[i].blob.patch, fw_blobs[i].blob.path);
365 }
366
367 /* Next platform is good: */
368 if (fw_blobs[i].p < fw_blobs[i - 1].p)
369 continue;
370
371 /* Next platform revision is good: */
372 if (fw_blobs[i].p == fw_blobs[i - 1].p &&
373 fw_blobs[i].rev < fw_blobs[i - 1].rev)
374 continue;
375
376 /* Platform/revision must be in order: */
377 if (fw_blobs[i].p != fw_blobs[i - 1].p ||
378 fw_blobs[i].rev != fw_blobs[i - 1].rev)
379 goto bad;
380
381 /* Next major version is good: */
382 if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
383 continue;
384
385 /* New must be before legacy: */
386 if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
387 goto bad;
388
389 /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
390 if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
391 if (!fw_blobs[i - 1].blob.major)
392 continue;
393
394 if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
395 continue;
396 }
397
398 /* Major versions must be in order: */
399 if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
400 goto bad;
401
402 /* Next minor version is good: */
403 if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
404 continue;
405
406 /* Minor versions must be in order: */
407 if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
408 goto bad;
409
410 /* Patch versions must be in order and unique: */
411 if (fw_blobs[i].blob.patch < fw_blobs[i - 1].blob.patch)
412 continue;
413
414 bad:
415 drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
416 intel_uc_fw_type_repr(type),
417 intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
418 fw_blobs[i - 1].blob.legacy ? "L" : "v",
419 fw_blobs[i - 1].blob.major,
420 fw_blobs[i - 1].blob.minor,
421 fw_blobs[i - 1].blob.patch,
422 intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
423 fw_blobs[i].blob.legacy ? "L" : "v",
424 fw_blobs[i].blob.major,
425 fw_blobs[i].blob.minor,
426 fw_blobs[i].blob.patch);
427 return false;
428 }
429
430 return true;
431 }
432
__override_guc_firmware_path(struct drm_i915_private * i915)433 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
434 {
435 if (i915->params.enable_guc & ENABLE_GUC_MASK)
436 return i915->params.guc_firmware_path;
437 return "";
438 }
439
__override_huc_firmware_path(struct drm_i915_private * i915)440 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
441 {
442 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
443 return i915->params.huc_firmware_path;
444 return "";
445 }
446
__override_gsc_firmware_path(struct drm_i915_private * i915)447 static const char *__override_gsc_firmware_path(struct drm_i915_private *i915)
448 {
449 return i915->params.gsc_firmware_path;
450 }
451
__uc_fw_user_override(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)452 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
453 {
454 const char *path = NULL;
455
456 switch (uc_fw->type) {
457 case INTEL_UC_FW_TYPE_GUC:
458 path = __override_guc_firmware_path(i915);
459 break;
460 case INTEL_UC_FW_TYPE_HUC:
461 path = __override_huc_firmware_path(i915);
462 break;
463 case INTEL_UC_FW_TYPE_GSC:
464 path = __override_gsc_firmware_path(i915);
465 break;
466 }
467
468 if (unlikely(path)) {
469 uc_fw->file_selected.path = path;
470 uc_fw->user_overridden = true;
471 }
472 }
473
intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver * ver,const void * data)474 void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
475 const void *data)
476 {
477 const struct intel_gsc_manifest_header *manifest = data;
478
479 ver->major = manifest->fw_version.major;
480 ver->minor = manifest->fw_version.minor;
481 ver->patch = manifest->fw_version.hotfix;
482 ver->build = manifest->fw_version.build;
483 }
484
485 /**
486 * intel_uc_fw_init_early - initialize the uC object and select the firmware
487 * @uc_fw: uC firmware
488 * @type: type of uC
489 * @needs_ggtt_mapping: whether the FW needs to be GGTT mapped for loading
490 *
491 * Initialize the state of our uC object and relevant tracking and select the
492 * firmware to fetch and load.
493 */
intel_uc_fw_init_early(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type,bool needs_ggtt_mapping)494 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
495 enum intel_uc_fw_type type,
496 bool needs_ggtt_mapping)
497 {
498 struct intel_gt *gt = ____uc_fw_to_gt(uc_fw, type);
499 struct drm_i915_private *i915 = gt->i915;
500
501 /*
502 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
503 * before we're looked at the HW caps to see if we have uc support
504 */
505 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
506 GEM_BUG_ON(uc_fw->status);
507 GEM_BUG_ON(uc_fw->file_selected.path);
508
509 uc_fw->type = type;
510 uc_fw->needs_ggtt_mapping = needs_ggtt_mapping;
511
512 if (HAS_GT_UC(i915)) {
513 if (!validate_fw_table_type(i915, type)) {
514 gt->uc.fw_table_invalid = true;
515 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
516 return;
517 }
518
519 __uc_fw_auto_select(i915, uc_fw);
520 __uc_fw_user_override(i915, uc_fw);
521 }
522
523 intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
524 INTEL_UC_FIRMWARE_SELECTED :
525 INTEL_UC_FIRMWARE_DISABLED :
526 INTEL_UC_FIRMWARE_NOT_SUPPORTED);
527 }
528
__force_fw_fetch_failures(struct intel_uc_fw * uc_fw,int e)529 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
530 {
531 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
532 bool user = e == -EINVAL;
533
534 if (i915_inject_probe_error(i915, e)) {
535 /* non-existing blob */
536 uc_fw->file_selected.path = "<invalid>";
537 uc_fw->user_overridden = user;
538 } else if (i915_inject_probe_error(i915, e)) {
539 /* require next major version */
540 uc_fw->file_wanted.ver.major += 1;
541 uc_fw->file_wanted.ver.minor = 0;
542 uc_fw->user_overridden = user;
543 } else if (i915_inject_probe_error(i915, e)) {
544 /* require next minor version */
545 uc_fw->file_wanted.ver.minor += 1;
546 uc_fw->user_overridden = user;
547 } else if (uc_fw->file_wanted.ver.major &&
548 i915_inject_probe_error(i915, e)) {
549 /* require prev major version */
550 uc_fw->file_wanted.ver.major -= 1;
551 uc_fw->file_wanted.ver.minor = 0;
552 uc_fw->user_overridden = user;
553 } else if (uc_fw->file_wanted.ver.minor &&
554 i915_inject_probe_error(i915, e)) {
555 /* require prev minor version - hey, this should work! */
556 uc_fw->file_wanted.ver.minor -= 1;
557 uc_fw->user_overridden = user;
558 } else if (user && i915_inject_probe_error(i915, e)) {
559 /* officially unsupported platform */
560 uc_fw->file_wanted.ver.major = 0;
561 uc_fw->file_wanted.ver.minor = 0;
562 uc_fw->user_overridden = true;
563 }
564 }
565
uc_unpack_css_version(struct intel_uc_fw_ver * ver,u32 css_value)566 static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
567 {
568 /* Get version numbers from the CSS header */
569 ver->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css_value);
570 ver->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css_value);
571 ver->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css_value);
572 }
573
guc_read_css_info(struct intel_uc_fw * uc_fw,struct uc_css_header * css)574 static void guc_read_css_info(struct intel_uc_fw *uc_fw, struct uc_css_header *css)
575 {
576 struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
577
578 /*
579 * The GuC firmware includes an extra version number to specify the
580 * submission API level. This allows submission code to work with
581 * multiple GuC versions without having to know the absolute firmware
582 * version number (there are likely to be multiple firmware releases
583 * which all support the same submission API level).
584 *
585 * Note that the spec for the CSS header defines this version number
586 * as 'vf_version' as it was originally intended for virtualisation.
587 * However, it is applicable to native submission as well.
588 *
589 * Unfortunately, due to an oversight, this version number was only
590 * exposed in the CSS header from v70.6.0.
591 */
592 if (uc_fw->file_selected.ver.major >= 70) {
593 if (uc_fw->file_selected.ver.minor >= 6) {
594 /* v70.6.0 adds CSS header support */
595 uc_unpack_css_version(&guc->submission_version, css->vf_version);
596 } else if (uc_fw->file_selected.ver.minor >= 3) {
597 /* v70.3.0 introduced v1.1.0 */
598 guc->submission_version.major = 1;
599 guc->submission_version.minor = 1;
600 guc->submission_version.patch = 0;
601 } else {
602 /* v70.0.0 introduced v1.0.0 */
603 guc->submission_version.major = 1;
604 guc->submission_version.minor = 0;
605 guc->submission_version.patch = 0;
606 }
607 } else if (uc_fw->file_selected.ver.major >= 69) {
608 /* v69.0.0 introduced v0.10.0 */
609 guc->submission_version.major = 0;
610 guc->submission_version.minor = 10;
611 guc->submission_version.patch = 0;
612 } else {
613 /* Prior versions were v0.1.0 */
614 guc->submission_version.major = 0;
615 guc->submission_version.minor = 1;
616 guc->submission_version.patch = 0;
617 }
618
619 uc_fw->private_data_size = css->private_data_size;
620 }
621
__check_ccs_header(struct intel_gt * gt,const void * fw_data,size_t fw_size,struct intel_uc_fw * uc_fw)622 static int __check_ccs_header(struct intel_gt *gt,
623 const void *fw_data, size_t fw_size,
624 struct intel_uc_fw *uc_fw)
625 {
626 struct uc_css_header *css;
627 size_t size;
628
629 /* Check the size of the blob before examining buffer contents */
630 if (unlikely(fw_size < sizeof(struct uc_css_header))) {
631 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
632 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
633 fw_size, sizeof(struct uc_css_header));
634 return -ENODATA;
635 }
636
637 css = (struct uc_css_header *)fw_data;
638
639 /* Check integrity of size values inside CSS header */
640 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
641 css->exponent_size_dw) * sizeof(u32);
642 if (unlikely(size != sizeof(struct uc_css_header))) {
643 gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
644 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
645 fw_size, sizeof(struct uc_css_header));
646 return -EPROTO;
647 }
648
649 /* uCode size must calculated from other sizes */
650 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
651
652 /* now RSA */
653 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
654
655 /* At least, it should have header, uCode and RSA. Size of all three. */
656 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
657 if (unlikely(fw_size < size)) {
658 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
659 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
660 fw_size, size);
661 return -ENOEXEC;
662 }
663
664 /* Sanity check whether this fw is not larger than whole WOPCM memory */
665 size = __intel_uc_fw_get_upload_size(uc_fw);
666 if (unlikely(size >= gt->wopcm.size)) {
667 gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
668 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
669 size, (size_t)gt->wopcm.size);
670 return -E2BIG;
671 }
672
673 uc_unpack_css_version(&uc_fw->file_selected.ver, css->sw_version);
674
675 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
676 guc_read_css_info(uc_fw, css);
677
678 return 0;
679 }
680
check_gsc_manifest(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)681 static int check_gsc_manifest(struct intel_gt *gt,
682 const struct firmware *fw,
683 struct intel_uc_fw *uc_fw)
684 {
685 switch (uc_fw->type) {
686 case INTEL_UC_FW_TYPE_HUC:
687 intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
688 break;
689 case INTEL_UC_FW_TYPE_GSC:
690 intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
691 break;
692 default:
693 MISSING_CASE(uc_fw->type);
694 return -EINVAL;
695 }
696
697 if (uc_fw->dma_start_offset) {
698 u32 delta = uc_fw->dma_start_offset;
699
700 __check_ccs_header(gt, fw->data + delta, fw->size - delta, uc_fw);
701 }
702
703 return 0;
704 }
705
check_ccs_header(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)706 static int check_ccs_header(struct intel_gt *gt,
707 const struct firmware *fw,
708 struct intel_uc_fw *uc_fw)
709 {
710 return __check_ccs_header(gt, fw->data, fw->size, uc_fw);
711 }
712
is_ver_8bit(struct intel_uc_fw_ver * ver)713 static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
714 {
715 return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
716 }
717
guc_check_version_range(struct intel_uc_fw * uc_fw)718 static int guc_check_version_range(struct intel_uc_fw *uc_fw)
719 {
720 struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
721 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
722
723 /*
724 * GuC version number components are defined as being 8-bits.
725 * The submission code relies on this to optimise version comparison
726 * tests. So enforce the restriction here.
727 */
728
729 if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
730 gt_warn(gt, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
731 intel_uc_fw_type_repr(uc_fw->type),
732 uc_fw->file_selected.ver.major,
733 uc_fw->file_selected.ver.minor,
734 uc_fw->file_selected.ver.patch);
735 return -EINVAL;
736 }
737
738 if (!is_ver_8bit(&guc->submission_version)) {
739 gt_warn(gt, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
740 intel_uc_fw_type_repr(uc_fw->type),
741 guc->submission_version.major,
742 guc->submission_version.minor,
743 guc->submission_version.patch);
744 return -EINVAL;
745 }
746
747 return i915_inject_probe_error(gt->i915, -EINVAL);
748 }
749
check_fw_header(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)750 static int check_fw_header(struct intel_gt *gt,
751 const struct firmware *fw,
752 struct intel_uc_fw *uc_fw)
753 {
754 int err = 0;
755
756 if (uc_fw->has_gsc_headers)
757 err = check_gsc_manifest(gt, fw, uc_fw);
758 else
759 err = check_ccs_header(gt, fw, uc_fw);
760 if (err)
761 return err;
762
763 return 0;
764 }
765
try_firmware_load(struct intel_uc_fw * uc_fw,const struct firmware ** fw)766 static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
767 {
768 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
769 struct device *dev = gt->i915->drm.dev;
770 int err;
771
772 err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
773
774 if (err)
775 return err;
776
777 if (uc_fw->needs_ggtt_mapping && (*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
778 gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
779 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
780 (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
781
782 /* try to find another blob to load */
783 release_firmware(*fw);
784 *fw = NULL;
785 return -ENOENT;
786 }
787
788 return 0;
789 }
790
check_mtl_huc_guc_compatibility(struct intel_gt * gt,struct intel_uc_fw_file * huc_selected)791 static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
792 struct intel_uc_fw_file *huc_selected)
793 {
794 struct intel_uc_fw_file *guc_selected = >->uc.guc.fw.file_selected;
795 struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
796 struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
797 bool new_huc, new_guc;
798
799 /* we can only do this check after having fetched both GuC and HuC */
800 GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
801
802 /*
803 * Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
804 * requires GuC 70.7.0 or newer. Older HuC binaries will instead require
805 * GuC < 70.7.0.
806 */
807 new_huc = huc_ver->major > 8 ||
808 (huc_ver->major == 8 && huc_ver->minor > 5) ||
809 (huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
810
811 new_guc = guc_ver->major > 70 ||
812 (guc_ver->major == 70 && guc_ver->minor >= 7);
813
814 if (new_huc != new_guc) {
815 UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
816 huc_ver->major, huc_ver->minor, huc_ver->patch,
817 guc_ver->major, guc_ver->minor, guc_ver->patch);
818 gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
819 return -ENOEXEC;
820 }
821
822 return 0;
823 }
824
intel_uc_check_file_version(struct intel_uc_fw * uc_fw,bool * old_ver)825 int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
826 {
827 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
828 struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
829 struct intel_uc_fw_file *selected = &uc_fw->file_selected;
830 int ret;
831
832 /*
833 * MTL has some compatibility issues with early GuC/HuC binaries
834 * not working with newer ones. This is specific to MTL and we
835 * don't expect it to extend to other platforms.
836 */
837 if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
838 ret = check_mtl_huc_guc_compatibility(gt, selected);
839 if (ret)
840 return ret;
841 }
842
843 if (!wanted->ver.major || !selected->ver.major)
844 return 0;
845
846 /* Check the file's major version was as it claimed */
847 if (selected->ver.major != wanted->ver.major) {
848 UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
849 intel_uc_fw_type_repr(uc_fw->type), selected->path,
850 selected->ver.major, selected->ver.minor,
851 wanted->ver.major, wanted->ver.minor);
852 if (!intel_uc_fw_is_overridden(uc_fw))
853 return -ENOEXEC;
854 } else if (old_ver) {
855 if (selected->ver.minor < wanted->ver.minor)
856 *old_ver = true;
857 else if ((selected->ver.minor == wanted->ver.minor) &&
858 (selected->ver.patch < wanted->ver.patch))
859 *old_ver = true;
860 }
861
862 return 0;
863 }
864
865 /**
866 * intel_uc_fw_fetch - fetch uC firmware
867 * @uc_fw: uC firmware
868 *
869 * Fetch uC firmware into GEM obj.
870 *
871 * Return: 0 on success, a negative errno code on failure.
872 */
intel_uc_fw_fetch(struct intel_uc_fw * uc_fw)873 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
874 {
875 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
876 struct drm_i915_private *i915 = gt->i915;
877 struct intel_uc_fw_file file_ideal;
878 struct drm_i915_gem_object *obj;
879 const struct firmware *fw = NULL;
880 bool old_ver = false;
881 int err;
882
883 GEM_BUG_ON(!gt->wopcm.size);
884 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
885
886 err = i915_inject_probe_error(i915, -ENXIO);
887 if (err)
888 goto fail;
889
890 __force_fw_fetch_failures(uc_fw, -EINVAL);
891 __force_fw_fetch_failures(uc_fw, -ESTALE);
892
893 err = try_firmware_load(uc_fw, &fw);
894 memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
895
896 /* Any error is terminal if overriding. Don't bother searching for older versions */
897 if (err && intel_uc_fw_is_overridden(uc_fw))
898 goto fail;
899
900 while (err == -ENOENT) {
901 old_ver = true;
902
903 __uc_fw_auto_select(i915, uc_fw);
904 if (!uc_fw->file_selected.path) {
905 /*
906 * No more options! But set the path back to something
907 * valid just in case it gets dereferenced.
908 */
909 uc_fw->file_selected.path = file_ideal.path;
910
911 /* Also, preserve the version that was really wanted */
912 memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
913 break;
914 }
915
916 err = try_firmware_load(uc_fw, &fw);
917 }
918
919 if (err)
920 goto fail;
921
922 err = check_fw_header(gt, fw, uc_fw);
923 if (err)
924 goto fail;
925
926 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
927 err = guc_check_version_range(uc_fw);
928 if (err)
929 goto fail;
930 }
931
932 err = intel_uc_check_file_version(uc_fw, &old_ver);
933 if (err)
934 goto fail;
935
936 if (old_ver && uc_fw->file_selected.ver.major) {
937 /* Preserve the version that was really wanted */
938 memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
939
940 UNEXPECTED(gt, "%s firmware %s (%d.%d.%d) is recommended, but only %s (%d.%d.%d) was found\n",
941 intel_uc_fw_type_repr(uc_fw->type),
942 uc_fw->file_wanted.path,
943 uc_fw->file_wanted.ver.major,
944 uc_fw->file_wanted.ver.minor,
945 uc_fw->file_wanted.ver.patch,
946 uc_fw->file_selected.path,
947 uc_fw->file_selected.ver.major,
948 uc_fw->file_selected.ver.minor,
949 uc_fw->file_selected.ver.patch);
950 gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
951 INTEL_UC_FIRMWARE_URL);
952 }
953
954 if (HAS_LMEM(i915)) {
955 obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
956 if (!IS_ERR(obj))
957 obj->flags |= I915_BO_ALLOC_PM_EARLY;
958 } else {
959 obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
960 }
961
962 if (IS_ERR(obj)) {
963 err = PTR_ERR(obj);
964 goto fail;
965 }
966
967 uc_fw->obj = obj;
968 uc_fw->size = fw->size;
969 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
970
971 release_firmware(fw);
972 return 0;
973
974 fail:
975 intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
976 INTEL_UC_FIRMWARE_MISSING :
977 INTEL_UC_FIRMWARE_ERROR);
978
979 gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
980 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
981 gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
982 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
983
984 release_firmware(fw); /* OK even if fw is NULL */
985 return err;
986 }
987
uc_fw_ggtt_offset(struct intel_uc_fw * uc_fw)988 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
989 {
990 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
991 struct i915_ggtt *ggtt = gt->ggtt;
992 struct drm_mm_node *node = &ggtt->uc_fw;
993 u32 offset = uc_fw->type * INTEL_UC_RSVD_GGTT_PER_FW;
994
995 /*
996 * The media GT shares the GGTT with the root GT, which means that
997 * we need to use different offsets for the binaries on the media GT.
998 * To keep the math simple, we use 8MB for the root tile and 8MB for
999 * the media one. This will need to be updated if we ever have more
1000 * than 1 media GT.
1001 */
1002 BUILD_BUG_ON(INTEL_UC_FW_NUM_TYPES * INTEL_UC_RSVD_GGTT_PER_FW > SZ_8M);
1003 GEM_BUG_ON(gt->type == GT_MEDIA && gt->info.id > 1);
1004 if (gt->type == GT_MEDIA)
1005 offset += SZ_8M;
1006
1007 GEM_BUG_ON(!drm_mm_node_allocated(node));
1008 GEM_BUG_ON(upper_32_bits(node->start));
1009 GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
1010 GEM_BUG_ON(offset + uc_fw->obj->base.size > node->size);
1011 GEM_BUG_ON(uc_fw->obj->base.size > INTEL_UC_RSVD_GGTT_PER_FW);
1012
1013 return lower_32_bits(node->start + offset);
1014 }
1015
uc_fw_bind_ggtt(struct intel_uc_fw * uc_fw)1016 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
1017 {
1018 struct drm_i915_gem_object *obj = uc_fw->obj;
1019 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
1020 struct i915_vma_resource *vma_res = &uc_fw->vma_res;
1021 u32 pte_flags = 0;
1022
1023 if (!uc_fw->needs_ggtt_mapping)
1024 return;
1025
1026 vma_res->start = uc_fw_ggtt_offset(uc_fw);
1027 vma_res->node_size = obj->base.size;
1028 vma_res->bi.pages = obj->mm.pages;
1029
1030 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1031
1032 /* uc_fw->obj cache domains were not controlled across suspend */
1033 if (i915_gem_object_has_struct_page(obj))
1034 drm_clflush_sg(vma_res->bi.pages);
1035
1036 if (i915_gem_object_is_lmem(obj))
1037 pte_flags |= PTE_LM;
1038
1039 if (ggtt->vm.raw_insert_entries)
1040 ggtt->vm.raw_insert_entries(&ggtt->vm, vma_res,
1041 i915_gem_get_pat_index(ggtt->vm.i915,
1042 I915_CACHE_NONE),
1043 pte_flags);
1044 else
1045 ggtt->vm.insert_entries(&ggtt->vm, vma_res,
1046 i915_gem_get_pat_index(ggtt->vm.i915,
1047 I915_CACHE_NONE),
1048 pte_flags);
1049 }
1050
uc_fw_unbind_ggtt(struct intel_uc_fw * uc_fw)1051 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
1052 {
1053 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
1054 struct i915_vma_resource *vma_res = &uc_fw->vma_res;
1055
1056 if (!vma_res->node_size)
1057 return;
1058
1059 ggtt->vm.clear_range(&ggtt->vm, vma_res->start, vma_res->node_size);
1060 }
1061
uc_fw_xfer(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)1062 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
1063 {
1064 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1065 struct intel_uncore *uncore = gt->uncore;
1066 u64 offset;
1067 int ret;
1068
1069 ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
1070 if (ret)
1071 return ret;
1072
1073 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1074
1075 /* Set the source address for the uCode */
1076 offset = uc_fw->vma_res.start + uc_fw->dma_start_offset;
1077 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
1078 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
1079 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
1080
1081 /* Set the DMA destination */
1082 intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
1083 intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
1084
1085 /*
1086 * Set the transfer size. The header plus uCode will be copied to WOPCM
1087 * via DMA, excluding any other components
1088 */
1089 intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
1090 sizeof(struct uc_css_header) + uc_fw->ucode_size);
1091
1092 /* Start the DMA */
1093 intel_uncore_write_fw(uncore, DMA_CTRL,
1094 _MASKED_BIT_ENABLE(dma_flags | START_DMA));
1095
1096 /* Wait for DMA to finish */
1097 ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
1098 if (ret)
1099 gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
1100 intel_uc_fw_type_repr(uc_fw->type),
1101 intel_uncore_read_fw(uncore, DMA_CTRL));
1102
1103 /* Disable the bits once DMA is over */
1104 intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
1105
1106 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1107
1108 return ret;
1109 }
1110
intel_uc_fw_mark_load_failed(struct intel_uc_fw * uc_fw,int err)1111 int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
1112 {
1113 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1114
1115 GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
1116
1117 gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
1118 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
1119 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
1120
1121 return err;
1122 }
1123
1124 /**
1125 * intel_uc_fw_upload - load uC firmware using custom loader
1126 * @uc_fw: uC firmware
1127 * @dst_offset: destination offset
1128 * @dma_flags: flags for flags for dma ctrl
1129 *
1130 * Loads uC firmware and updates internal flags.
1131 *
1132 * Return: 0 on success, non-zero on failure.
1133 */
intel_uc_fw_upload(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)1134 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
1135 {
1136 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1137 int err;
1138
1139 /* make sure the status was cleared the last time we reset the uc */
1140 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
1141
1142 err = i915_inject_probe_error(gt->i915, -ENOEXEC);
1143 if (err)
1144 return err;
1145
1146 if (!intel_uc_fw_is_loadable(uc_fw))
1147 return -ENOEXEC;
1148
1149 /* Call custom loader */
1150 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
1151 if (err)
1152 goto fail;
1153
1154 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
1155 return 0;
1156
1157 fail:
1158 return intel_uc_fw_mark_load_failed(uc_fw, err);
1159 }
1160
uc_fw_need_rsa_in_memory(struct intel_uc_fw * uc_fw)1161 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
1162 {
1163 /*
1164 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
1165 * while it reads it from the 64 RSA registers if it is smaller.
1166 * The HuC RSA is always read from memory.
1167 */
1168 return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
1169 }
1170
uc_fw_rsa_data_create(struct intel_uc_fw * uc_fw)1171 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
1172 {
1173 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1174 struct i915_vma *vma;
1175 size_t copied;
1176 void *vaddr;
1177 int err;
1178
1179 err = i915_inject_probe_error(gt->i915, -ENXIO);
1180 if (err)
1181 return err;
1182
1183 if (!uc_fw_need_rsa_in_memory(uc_fw))
1184 return 0;
1185
1186 /*
1187 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
1188 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
1189 * authentication from memory, as the RSA offset now falls within the
1190 * GuC inaccessible range. We resort to perma-pinning an additional vma
1191 * within the accessible range that only contains the RSA signature.
1192 * The GuC HW can use this extra pinning to perform the authentication
1193 * since its GGTT offset will be GuC accessible.
1194 */
1195 GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
1196 vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE);
1197 if (IS_ERR(vma))
1198 return PTR_ERR(vma);
1199
1200 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
1201 intel_gt_coherent_map_type(gt, vma->obj, true));
1202 if (IS_ERR(vaddr)) {
1203 i915_vma_unpin_and_release(&vma, 0);
1204 err = PTR_ERR(vaddr);
1205 goto unpin_out;
1206 }
1207
1208 copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
1209 i915_gem_object_unpin_map(vma->obj);
1210
1211 if (copied < uc_fw->rsa_size) {
1212 err = -ENOMEM;
1213 goto unpin_out;
1214 }
1215
1216 uc_fw->rsa_data = vma;
1217
1218 return 0;
1219
1220 unpin_out:
1221 i915_vma_unpin_and_release(&vma, 0);
1222 return err;
1223 }
1224
uc_fw_rsa_data_destroy(struct intel_uc_fw * uc_fw)1225 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
1226 {
1227 i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
1228 }
1229
intel_uc_fw_init(struct intel_uc_fw * uc_fw)1230 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
1231 {
1232 int err;
1233
1234 /* this should happen before the load! */
1235 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
1236
1237 if (!intel_uc_fw_is_available(uc_fw))
1238 return -ENOEXEC;
1239
1240 err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
1241 if (err) {
1242 gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
1243 intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
1244 goto out;
1245 }
1246
1247 err = uc_fw_rsa_data_create(uc_fw);
1248 if (err) {
1249 gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
1250 intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
1251 goto out_unpin;
1252 }
1253
1254 uc_fw_bind_ggtt(uc_fw);
1255
1256 return 0;
1257
1258 out_unpin:
1259 i915_gem_object_unpin_pages(uc_fw->obj);
1260 out:
1261 return err;
1262 }
1263
intel_uc_fw_fini(struct intel_uc_fw * uc_fw)1264 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
1265 {
1266 uc_fw_unbind_ggtt(uc_fw);
1267 uc_fw_rsa_data_destroy(uc_fw);
1268
1269 if (i915_gem_object_has_pinned_pages(uc_fw->obj))
1270 i915_gem_object_unpin_pages(uc_fw->obj);
1271
1272 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
1273 }
1274
intel_uc_fw_resume_mapping(struct intel_uc_fw * uc_fw)1275 void intel_uc_fw_resume_mapping(struct intel_uc_fw *uc_fw)
1276 {
1277 if (!intel_uc_fw_is_available(uc_fw))
1278 return;
1279
1280 if (!i915_gem_object_has_pinned_pages(uc_fw->obj))
1281 return;
1282
1283 uc_fw_bind_ggtt(uc_fw);
1284 }
1285
1286 /**
1287 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
1288 * @uc_fw: uC firmware
1289 *
1290 * Cleans up uC firmware by releasing the firmware GEM obj.
1291 */
intel_uc_fw_cleanup_fetch(struct intel_uc_fw * uc_fw)1292 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
1293 {
1294 if (!intel_uc_fw_is_available(uc_fw))
1295 return;
1296
1297 i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
1298
1299 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
1300 }
1301
1302 /**
1303 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
1304 *
1305 * @uc_fw: uC firmware
1306 * @dst: dst buffer
1307 * @max_len: max number of bytes to copy
1308 *
1309 * Return: number of copied bytes.
1310 */
intel_uc_fw_copy_rsa(struct intel_uc_fw * uc_fw,void * dst,u32 max_len)1311 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
1312 {
1313 struct intel_memory_region *mr = uc_fw->obj->mm.region;
1314 u32 size = min_t(u32, uc_fw->rsa_size, max_len);
1315 u32 offset = uc_fw->dma_start_offset + sizeof(struct uc_css_header) + uc_fw->ucode_size;
1316 struct sgt_iter iter;
1317 size_t count = 0;
1318 int idx;
1319
1320 /* Called during reset handling, must be atomic [no fs_reclaim] */
1321 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
1322
1323 idx = offset >> PAGE_SHIFT;
1324 offset = offset_in_page(offset);
1325 if (i915_gem_object_has_struct_page(uc_fw->obj)) {
1326 struct page *page;
1327
1328 for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
1329 u32 len = min_t(u32, size, PAGE_SIZE - offset);
1330 void *vaddr;
1331
1332 if (idx > 0) {
1333 idx--;
1334 continue;
1335 }
1336
1337 vaddr = kmap_atomic(page);
1338 memcpy(dst, vaddr + offset, len);
1339 kunmap_atomic(vaddr);
1340
1341 offset = 0;
1342 dst += len;
1343 size -= len;
1344 count += len;
1345 if (!size)
1346 break;
1347 }
1348 } else {
1349 dma_addr_t addr;
1350
1351 for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
1352 u32 len = min_t(u32, size, PAGE_SIZE - offset);
1353 void __iomem *vaddr;
1354
1355 if (idx > 0) {
1356 idx--;
1357 continue;
1358 }
1359
1360 vaddr = io_mapping_map_atomic_wc(&mr->iomap,
1361 addr - mr->region.start);
1362 memcpy_fromio(dst, vaddr + offset, len);
1363 io_mapping_unmap_atomic(vaddr);
1364
1365 offset = 0;
1366 dst += len;
1367 size -= len;
1368 count += len;
1369 if (!size)
1370 break;
1371 }
1372 }
1373
1374 return count;
1375 }
1376
1377 /**
1378 * intel_uc_fw_dump - dump information about uC firmware
1379 * @uc_fw: uC firmware
1380 * @p: the &drm_printer
1381 *
1382 * Pretty printer for uC firmware.
1383 */
intel_uc_fw_dump(const struct intel_uc_fw * uc_fw,struct drm_printer * p)1384 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
1385 {
1386 bool got_wanted;
1387
1388 drm_printf(p, "%s firmware: %s\n",
1389 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
1390 if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
1391 drm_printf(p, "%s firmware wanted: %s\n",
1392 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
1393 drm_printf(p, "\tstatus: %s\n",
1394 intel_uc_fw_status_repr(uc_fw->status));
1395
1396 if (uc_fw->file_selected.ver.major < uc_fw->file_wanted.ver.major)
1397 got_wanted = false;
1398 else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
1399 (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor))
1400 got_wanted = false;
1401 else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
1402 (uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
1403 (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
1404 got_wanted = false;
1405 else
1406 got_wanted = true;
1407
1408 if (!got_wanted)
1409 drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
1410 uc_fw->file_wanted.ver.major,
1411 uc_fw->file_wanted.ver.minor,
1412 uc_fw->file_wanted.ver.patch,
1413 uc_fw->file_selected.ver.major,
1414 uc_fw->file_selected.ver.minor,
1415 uc_fw->file_selected.ver.patch);
1416 else
1417 drm_printf(p, "\tversion: found %u.%u.%u\n",
1418 uc_fw->file_selected.ver.major,
1419 uc_fw->file_selected.ver.minor,
1420 uc_fw->file_selected.ver.patch);
1421 drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
1422 drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
1423 }
1424