1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2018 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
12 #include "intel_gt.h"
13 #include "intel_gt_mcr.h"
14 #include "intel_gt_regs.h"
15 #include "intel_ring.h"
16 #include "intel_workarounds.h"
17
18 /**
19 * DOC: Hardware workarounds
20 *
21 * Hardware workarounds are register programming documented to be executed in
22 * the driver that fall outside of the normal programming sequences for a
23 * platform. There are some basic categories of workarounds, depending on
24 * how/when they are applied:
25 *
26 * - Context workarounds: workarounds that touch registers that are
27 * saved/restored to/from the HW context image. The list is emitted (via Load
28 * Register Immediate commands) once when initializing the device and saved in
29 * the default context. That default context is then used on every context
30 * creation to have a "primed golden context", i.e. a context image that
31 * already contains the changes needed to all the registers.
32 *
33 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
34 * variants respective to the targeted platforms.
35 *
36 * - Engine workarounds: the list of these WAs is applied whenever the specific
37 * engine is reset. It's also possible that a set of engine classes share a
38 * common power domain and they are reset together. This happens on some
39 * platforms with render and compute engines. In this case (at least) one of
40 * them need to keeep the workaround programming: the approach taken in the
41 * driver is to tie those workarounds to the first compute/render engine that
42 * is registered. When executing with GuC submission, engine resets are
43 * outside of kernel driver control, hence the list of registers involved in
44 * written once, on engine initialization, and then passed to GuC, that
45 * saves/restores their values before/after the reset takes place. See
46 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
47 *
48 * Workarounds for registers specific to RCS and CCS should be implemented in
49 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
50 * registers belonging to BCS, VCS or VECS should be implemented in
51 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
52 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
53 * should be implemented in general_render_compute_wa_init().
54 *
55 * - GT workarounds: the list of these WAs is applied whenever these registers
56 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
57 *
58 * GT workarounds should be implemented in the \*_gt_workarounds_init()
59 * variants respective to the targeted platforms.
60 *
61 * - Register whitelist: some workarounds need to be implemented in userspace,
62 * but need to touch privileged registers. The whitelist in the kernel
63 * instructs the hardware to allow the access to happen. From the kernel side,
64 * this is just a special case of a MMIO workaround (as we write the list of
65 * these to/be-whitelisted registers to some special HW registers).
66 *
67 * Register whitelisting should be done in the \*_whitelist_build() variants
68 * respective to the targeted platforms.
69 *
70 * - Workaround batchbuffers: buffers that get executed automatically by the
71 * hardware on every HW context restore. These buffers are created and
72 * programmed in the default context so the hardware always go through those
73 * programming sequences when switching contexts. The support for workaround
74 * batchbuffers is enabled these hardware mechanisms:
75 *
76 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
77 * context, pointing the hardware to jump to that location when that offset
78 * is reached in the context restore. Workaround batchbuffer in the driver
79 * currently uses this mechanism for all platforms.
80 *
81 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
82 * pointing the hardware to a buffer to continue executing after the
83 * engine registers are restored in a context restore sequence. This is
84 * currently not used in the driver.
85 *
86 * - Other: There are WAs that, due to their nature, cannot be applied from a
87 * central place. Those are peppered around the rest of the code, as needed.
88 * Workarounds related to the display IP are the main example.
89 *
90 * .. [1] Technically, some registers are powercontext saved & restored, so they
91 * survive a suspend/resume. In practice, writing them again is not too
92 * costly and simplifies things, so it's the approach taken in the driver.
93 */
94
wa_init_start(struct i915_wa_list * wal,struct intel_gt * gt,const char * name,const char * engine_name)95 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
96 const char *name, const char *engine_name)
97 {
98 wal->gt = gt;
99 wal->name = name;
100 wal->engine_name = engine_name;
101 }
102
103 #define WA_LIST_CHUNK (1 << 4)
104
wa_init_finish(struct i915_wa_list * wal)105 static void wa_init_finish(struct i915_wa_list *wal)
106 {
107 /* Trim unused entries. */
108 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
109 struct i915_wa *list = kmemdup(wal->list,
110 wal->count * sizeof(*list),
111 GFP_KERNEL);
112
113 if (list) {
114 kfree(wal->list);
115 wal->list = list;
116 }
117 }
118
119 if (!wal->count)
120 return;
121
122 drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n",
123 wal->wa_count, wal->name, wal->engine_name);
124 }
125
126 static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore * uncore,const struct i915_wa_list * wal)127 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
128 {
129 enum forcewake_domains fw = 0;
130 struct i915_wa *wa;
131 unsigned int i;
132
133 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
134 fw |= intel_uncore_forcewake_for_reg(uncore,
135 wa->reg,
136 FW_REG_READ |
137 FW_REG_WRITE);
138
139 return fw;
140 }
141
_wa_add(struct i915_wa_list * wal,const struct i915_wa * wa)142 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
143 {
144 unsigned int addr = i915_mmio_reg_offset(wa->reg);
145 struct drm_i915_private *i915 = wal->gt->i915;
146 unsigned int start = 0, end = wal->count;
147 const unsigned int grow = WA_LIST_CHUNK;
148 struct i915_wa *wa_;
149
150 GEM_BUG_ON(!is_power_of_2(grow));
151
152 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
153 struct i915_wa *list;
154
155 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
156 GFP_KERNEL);
157 if (!list) {
158 drm_err(&i915->drm, "No space for workaround init!\n");
159 return;
160 }
161
162 if (wal->list) {
163 memcpy(list, wal->list, sizeof(*wa) * wal->count);
164 kfree(wal->list);
165 }
166
167 wal->list = list;
168 }
169
170 while (start < end) {
171 unsigned int mid = start + (end - start) / 2;
172
173 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
174 start = mid + 1;
175 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
176 end = mid;
177 } else {
178 wa_ = &wal->list[mid];
179
180 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
181 drm_err(&i915->drm,
182 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
183 i915_mmio_reg_offset(wa_->reg),
184 wa_->clr, wa_->set);
185
186 wa_->set &= ~wa->clr;
187 }
188
189 wal->wa_count++;
190 wa_->set |= wa->set;
191 wa_->clr |= wa->clr;
192 wa_->read |= wa->read;
193 return;
194 }
195 }
196
197 wal->wa_count++;
198 wa_ = &wal->list[wal->count++];
199 *wa_ = *wa;
200
201 while (wa_-- > wal->list) {
202 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
203 i915_mmio_reg_offset(wa_[1].reg));
204 if (i915_mmio_reg_offset(wa_[1].reg) >
205 i915_mmio_reg_offset(wa_[0].reg))
206 break;
207
208 swap(wa_[1], wa_[0]);
209 }
210 }
211
wa_add(struct i915_wa_list * wal,i915_reg_t reg,u32 clear,u32 set,u32 read_mask,bool masked_reg)212 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
213 u32 clear, u32 set, u32 read_mask, bool masked_reg)
214 {
215 struct i915_wa wa = {
216 .reg = reg,
217 .clr = clear,
218 .set = set,
219 .read = read_mask,
220 .masked_reg = masked_reg,
221 };
222
223 _wa_add(wal, &wa);
224 }
225
wa_mcr_add(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 clear,u32 set,u32 read_mask,bool masked_reg)226 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
227 u32 clear, u32 set, u32 read_mask, bool masked_reg)
228 {
229 struct i915_wa wa = {
230 .mcr_reg = reg,
231 .clr = clear,
232 .set = set,
233 .read = read_mask,
234 .masked_reg = masked_reg,
235 .is_mcr = 1,
236 };
237
238 _wa_add(wal, &wa);
239 }
240
241 static void
wa_write_clr_set(struct i915_wa_list * wal,i915_reg_t reg,u32 clear,u32 set)242 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
243 {
244 wa_add(wal, reg, clear, set, clear | set, false);
245 }
246
247 static void
wa_mcr_write_clr_set(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 clear,u32 set)248 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
249 {
250 wa_mcr_add(wal, reg, clear, set, clear | set, false);
251 }
252
253 static void
wa_write(struct i915_wa_list * wal,i915_reg_t reg,u32 set)254 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
255 {
256 wa_write_clr_set(wal, reg, ~0, set);
257 }
258
259 static void
wa_mcr_write(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 set)260 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
261 {
262 wa_mcr_write_clr_set(wal, reg, ~0, set);
263 }
264
265 static void
wa_write_or(struct i915_wa_list * wal,i915_reg_t reg,u32 set)266 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
267 {
268 wa_write_clr_set(wal, reg, set, set);
269 }
270
271 static void
wa_mcr_write_or(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 set)272 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
273 {
274 wa_mcr_write_clr_set(wal, reg, set, set);
275 }
276
277 static void
wa_write_clr(struct i915_wa_list * wal,i915_reg_t reg,u32 clr)278 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
279 {
280 wa_write_clr_set(wal, reg, clr, 0);
281 }
282
283 static void
wa_mcr_write_clr(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 clr)284 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
285 {
286 wa_mcr_write_clr_set(wal, reg, clr, 0);
287 }
288
289 /*
290 * WA operations on "masked register". A masked register has the upper 16 bits
291 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
292 * portion of the register without a rmw: you simply write in the upper 16 bits
293 * the mask of bits you are going to modify.
294 *
295 * The wa_masked_* family of functions already does the necessary operations to
296 * calculate the mask based on the parameters passed, so user only has to
297 * provide the lower 16 bits of that register.
298 */
299
300 static void
wa_masked_en(struct i915_wa_list * wal,i915_reg_t reg,u32 val)301 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
302 {
303 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
304 }
305
306 static void
wa_mcr_masked_en(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 val)307 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
308 {
309 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
310 }
311
312 static void
wa_masked_dis(struct i915_wa_list * wal,i915_reg_t reg,u32 val)313 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
314 {
315 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
316 }
317
318 static void
wa_mcr_masked_dis(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 val)319 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
320 {
321 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
322 }
323
324 static void
wa_masked_field_set(struct i915_wa_list * wal,i915_reg_t reg,u32 mask,u32 val)325 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
326 u32 mask, u32 val)
327 {
328 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
329 }
330
331 static void
wa_mcr_masked_field_set(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 mask,u32 val)332 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
333 u32 mask, u32 val)
334 {
335 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
336 }
337
gen6_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)338 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
339 struct i915_wa_list *wal)
340 {
341 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
342 }
343
gen7_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)344 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
345 struct i915_wa_list *wal)
346 {
347 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
348 }
349
gen8_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)350 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
351 struct i915_wa_list *wal)
352 {
353 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
354
355 /* WaDisableAsyncFlipPerfMode:bdw,chv */
356 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
357
358 /* WaDisablePartialInstShootdown:bdw,chv */
359 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
360 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
361
362 /* Use Force Non-Coherent whenever executing a 3D context. This is a
363 * workaround for a possible hang in the unlikely event a TLB
364 * invalidation occurs during a PSD flush.
365 */
366 /* WaForceEnableNonCoherent:bdw,chv */
367 /* WaHdcDisableFetchWhenMasked:bdw,chv */
368 wa_masked_en(wal, HDC_CHICKEN0,
369 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
370 HDC_FORCE_NON_COHERENT);
371
372 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
373 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
374 * polygons in the same 8x4 pixel/sample area to be processed without
375 * stalling waiting for the earlier ones to write to Hierarchical Z
376 * buffer."
377 *
378 * This optimization is off by default for BDW and CHV; turn it on.
379 */
380 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
381
382 /* Wa4x4STCOptimizationDisable:bdw,chv */
383 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
384
385 /*
386 * BSpec recommends 8x4 when MSAA is used,
387 * however in practice 16x4 seems fastest.
388 *
389 * Note that PS/WM thread counts depend on the WIZ hashing
390 * disable bit, which we don't touch here, but it's good
391 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
392 */
393 wa_masked_field_set(wal, GEN7_GT_MODE,
394 GEN6_WIZ_HASHING_MASK,
395 GEN6_WIZ_HASHING_16x4);
396 }
397
bdw_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)398 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
399 struct i915_wa_list *wal)
400 {
401 struct drm_i915_private *i915 = engine->i915;
402
403 gen8_ctx_workarounds_init(engine, wal);
404
405 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
406 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
407
408 /* WaDisableDopClockGating:bdw
409 *
410 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
411 * to disable EUTC clock gating.
412 */
413 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
414 DOP_CLOCK_GATING_DISABLE);
415
416 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
417 GEN8_SAMPLER_POWER_BYPASS_DIS);
418
419 wa_masked_en(wal, HDC_CHICKEN0,
420 /* WaForceContextSaveRestoreNonCoherent:bdw */
421 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
422 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
423 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
424 }
425
chv_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)426 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
427 struct i915_wa_list *wal)
428 {
429 gen8_ctx_workarounds_init(engine, wal);
430
431 /* WaDisableThreadStallDopClockGating:chv */
432 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
433
434 /* Improve HiZ throughput on CHV. */
435 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
436 }
437
gen9_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)438 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
439 struct i915_wa_list *wal)
440 {
441 struct drm_i915_private *i915 = engine->i915;
442
443 if (HAS_LLC(i915)) {
444 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
445 *
446 * Must match Display Engine. See
447 * WaCompressedResourceDisplayNewHashMode.
448 */
449 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
450 GEN9_PBE_COMPRESSED_HASH_SELECTION);
451 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
452 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
453 }
454
455 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
456 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
457 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
458 FLOW_CONTROL_ENABLE |
459 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
460
461 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
462 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
463 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
464 GEN9_ENABLE_YV12_BUGFIX |
465 GEN9_ENABLE_GPGPU_PREEMPTION);
466
467 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
468 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
469 wa_masked_en(wal, CACHE_MODE_1,
470 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
471 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
472
473 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
474 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
475 GEN9_CCS_TLB_PREFETCH_ENABLE);
476
477 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
478 wa_masked_en(wal, HDC_CHICKEN0,
479 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
480 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
481
482 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
483 * both tied to WaForceContextSaveRestoreNonCoherent
484 * in some hsds for skl. We keep the tie for all gen9. The
485 * documentation is a bit hazy and so we want to get common behaviour,
486 * even though there is no clear evidence we would need both on kbl/bxt.
487 * This area has been source of system hangs so we play it safe
488 * and mimic the skl regardless of what bspec says.
489 *
490 * Use Force Non-Coherent whenever executing a 3D context. This
491 * is a workaround for a possible hang in the unlikely event
492 * a TLB invalidation occurs during a PSD flush.
493 */
494
495 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
496 wa_masked_en(wal, HDC_CHICKEN0,
497 HDC_FORCE_NON_COHERENT);
498
499 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
500 if (IS_SKYLAKE(i915) ||
501 IS_KABYLAKE(i915) ||
502 IS_COFFEELAKE(i915) ||
503 IS_COMETLAKE(i915))
504 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
505 GEN8_SAMPLER_POWER_BYPASS_DIS);
506
507 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
508 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
509
510 /*
511 * Supporting preemption with fine-granularity requires changes in the
512 * batch buffer programming. Since we can't break old userspace, we
513 * need to set our default preemption level to safe value. Userspace is
514 * still able to use more fine-grained preemption levels, since in
515 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
516 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
517 * not real HW workarounds, but merely a way to start using preemption
518 * while maintaining old contract with userspace.
519 */
520
521 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
522 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
523
524 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
525 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
526 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
527 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
528
529 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
530 if (IS_GEN9_LP(i915))
531 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
532 }
533
skl_tune_iz_hashing(struct intel_engine_cs * engine,struct i915_wa_list * wal)534 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
535 struct i915_wa_list *wal)
536 {
537 struct intel_gt *gt = engine->gt;
538 u8 vals[3] = { 0, 0, 0 };
539 unsigned int i;
540
541 for (i = 0; i < 3; i++) {
542 u8 ss;
543
544 /*
545 * Only consider slices where one, and only one, subslice has 7
546 * EUs
547 */
548 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
549 continue;
550
551 /*
552 * subslice_7eu[i] != 0 (because of the check above) and
553 * ss_max == 4 (maximum number of subslices possible per slice)
554 *
555 * -> 0 <= ss <= 3;
556 */
557 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
558 vals[i] = 3 - ss;
559 }
560
561 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
562 return;
563
564 /* Tune IZ hashing. See intel_device_info_runtime_init() */
565 wa_masked_field_set(wal, GEN7_GT_MODE,
566 GEN9_IZ_HASHING_MASK(2) |
567 GEN9_IZ_HASHING_MASK(1) |
568 GEN9_IZ_HASHING_MASK(0),
569 GEN9_IZ_HASHING(2, vals[2]) |
570 GEN9_IZ_HASHING(1, vals[1]) |
571 GEN9_IZ_HASHING(0, vals[0]));
572 }
573
skl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)574 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
575 struct i915_wa_list *wal)
576 {
577 gen9_ctx_workarounds_init(engine, wal);
578 skl_tune_iz_hashing(engine, wal);
579 }
580
bxt_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)581 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
582 struct i915_wa_list *wal)
583 {
584 gen9_ctx_workarounds_init(engine, wal);
585
586 /* WaDisableThreadStallDopClockGating:bxt */
587 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
588 STALL_DOP_GATING_DISABLE);
589
590 /* WaToEnableHwFixForPushConstHWBug:bxt */
591 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
592 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
593 }
594
kbl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)595 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
596 struct i915_wa_list *wal)
597 {
598 struct drm_i915_private *i915 = engine->i915;
599
600 gen9_ctx_workarounds_init(engine, wal);
601
602 /* WaToEnableHwFixForPushConstHWBug:kbl */
603 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
604 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
605 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
606
607 /* WaDisableSbeCacheDispatchPortSharing:kbl */
608 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
609 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
610 }
611
glk_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)612 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
613 struct i915_wa_list *wal)
614 {
615 gen9_ctx_workarounds_init(engine, wal);
616
617 /* WaToEnableHwFixForPushConstHWBug:glk */
618 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
619 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
620 }
621
cfl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)622 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
623 struct i915_wa_list *wal)
624 {
625 gen9_ctx_workarounds_init(engine, wal);
626
627 /* WaToEnableHwFixForPushConstHWBug:cfl */
628 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
629 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
630
631 /* WaDisableSbeCacheDispatchPortSharing:cfl */
632 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
633 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
634 }
635
icl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)636 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
637 struct i915_wa_list *wal)
638 {
639 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
640 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
641
642 /* WaForceEnableNonCoherent:icl
643 * This is not the same workaround as in early Gen9 platforms, where
644 * lacking this could cause system hangs, but coherency performance
645 * overhead is high and only a few compute workloads really need it
646 * (the register is whitelisted in hardware now, so UMDs can opt in
647 * for coherency if they have a good reason).
648 */
649 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
650
651 /* WaEnableFloatBlendOptimization:icl */
652 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
653 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
654 0 /* write-only, so skip validation */,
655 true);
656
657 /* WaDisableGPGPUMidThreadPreemption:icl */
658 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
659 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
660 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
661
662 /* allow headerless messages for preemptible GPGPU context */
663 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
664 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
665
666 /* Wa_1604278689:icl,ehl */
667 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
668 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
669 0,
670 0xFFFFFFFF);
671
672 /* Wa_1406306137:icl,ehl */
673 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
674 }
675
676 /*
677 * These settings aren't actually workarounds, but general tuning settings that
678 * need to be programmed on dg2 platform.
679 */
dg2_ctx_gt_tuning_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)680 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
681 struct i915_wa_list *wal)
682 {
683 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
684 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
685 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
686 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
687 FF_MODE2_TDS_TIMER_128);
688 }
689
gen12_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)690 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
691 struct i915_wa_list *wal)
692 {
693 struct drm_i915_private *i915 = engine->i915;
694
695 /*
696 * Wa_1409142259:tgl,dg1,adl-p
697 * Wa_1409347922:tgl,dg1,adl-p
698 * Wa_1409252684:tgl,dg1,adl-p
699 * Wa_1409217633:tgl,dg1,adl-p
700 * Wa_1409207793:tgl,dg1,adl-p
701 * Wa_1409178076:tgl,dg1,adl-p
702 * Wa_1408979724:tgl,dg1,adl-p
703 * Wa_14010443199:tgl,rkl,dg1,adl-p
704 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
705 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
706 */
707 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
708 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
709
710 /* WaDisableGPGPUMidThreadPreemption:gen12 */
711 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
712 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
713 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
714
715 /*
716 * Wa_16011163337 - GS_TIMER
717 *
718 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
719 * need to program it even on those that don't explicitly list that
720 * workaround.
721 *
722 * Note that the programming of GEN12_FF_MODE2 is further modified
723 * according to the FF_MODE2 guidance given by Wa_1608008084.
724 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
725 * value when read from the CPU.
726 *
727 * The default value for this register is zero for all fields.
728 * So instead of doing a RMW we should just write the desired values
729 * for TDS and GS timers. Note that since the readback can't be trusted,
730 * the clear mask is just set to ~0 to make sure other bits are not
731 * inadvertently set. For the same reason read verification is ignored.
732 */
733 wa_add(wal,
734 GEN12_FF_MODE2,
735 ~0,
736 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
737 0, false);
738
739 if (!IS_DG1(i915)) {
740 /* Wa_1806527549 */
741 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
742
743 /* Wa_1606376872 */
744 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
745 }
746 }
747
dg1_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)748 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
749 struct i915_wa_list *wal)
750 {
751 gen12_ctx_workarounds_init(engine, wal);
752
753 /* Wa_1409044764 */
754 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
755 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
756
757 /* Wa_22010493298 */
758 wa_masked_en(wal, HIZ_CHICKEN,
759 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
760 }
761
dg2_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)762 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
763 struct i915_wa_list *wal)
764 {
765 dg2_ctx_gt_tuning_init(engine, wal);
766
767 /* Wa_16011186671:dg2_g11 */
768 if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
769 wa_mcr_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
770 wa_mcr_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
771 }
772
773 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
774 /* Wa_14010469329:dg2_g10 */
775 wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
776 XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
777
778 /*
779 * Wa_22010465075:dg2_g10
780 * Wa_22010613112:dg2_g10
781 * Wa_14010698770:dg2_g10
782 */
783 wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
784 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
785 }
786
787 /* Wa_16013271637:dg2 */
788 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
789 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
790
791 /* Wa_14014947963:dg2 */
792 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
793 IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
794 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
795
796 /* Wa_18018764978:dg2 */
797 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
798 IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
799 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
800
801 /* Wa_15010599737:dg2 */
802 wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
803
804 /* Wa_18019271663:dg2 */
805 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
806 }
807
mtl_ctx_gt_tuning_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)808 static void mtl_ctx_gt_tuning_init(struct intel_engine_cs *engine,
809 struct i915_wa_list *wal)
810 {
811 struct drm_i915_private *i915 = engine->i915;
812
813 dg2_ctx_gt_tuning_init(engine, wal);
814
815 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
816 IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
817 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
818 }
819
mtl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)820 static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
821 struct i915_wa_list *wal)
822 {
823 struct drm_i915_private *i915 = engine->i915;
824
825 mtl_ctx_gt_tuning_init(engine, wal);
826
827 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
828 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
829 /* Wa_14014947963 */
830 wa_masked_field_set(wal, VF_PREEMPTION,
831 PREEMPTION_VERTEX_COUNT, 0x4000);
832
833 /* Wa_16013271637 */
834 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
835 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
836
837 /* Wa_18019627453 */
838 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
839
840 /* Wa_18018764978 */
841 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
842 }
843
844 /* Wa_18019271663 */
845 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
846 }
847
fakewa_disable_nestedbb_mode(struct intel_engine_cs * engine,struct i915_wa_list * wal)848 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
849 struct i915_wa_list *wal)
850 {
851 /*
852 * This is a "fake" workaround defined by software to ensure we
853 * maintain reliable, backward-compatible behavior for userspace with
854 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
855 *
856 * The per-context setting of MI_MODE[12] determines whether the bits
857 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
858 * in the traditional manner or whether they should instead use a new
859 * tgl+ meaning that breaks backward compatibility, but allows nesting
860 * into 3rd-level batchbuffers. When this new capability was first
861 * added in TGL, it remained off by default unless a context
862 * intentionally opted in to the new behavior. However Xe_HPG now
863 * flips this on by default and requires that we explicitly opt out if
864 * we don't want the new behavior.
865 *
866 * From a SW perspective, we want to maintain the backward-compatible
867 * behavior for userspace, so we'll apply a fake workaround to set it
868 * back to the legacy behavior on platforms where the hardware default
869 * is to break compatibility. At the moment there is no Linux
870 * userspace that utilizes third-level batchbuffers, so this will avoid
871 * userspace from needing to make any changes. using the legacy
872 * meaning is the correct thing to do. If/when we have userspace
873 * consumers that want to utilize third-level batch nesting, we can
874 * provide a context parameter to allow them to opt-in.
875 */
876 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
877 }
878
gen12_ctx_gt_mocs_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)879 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
880 struct i915_wa_list *wal)
881 {
882 u8 mocs;
883
884 /*
885 * Some blitter commands do not have a field for MOCS, those
886 * commands will use MOCS index pointed by BLIT_CCTL.
887 * BLIT_CCTL registers are needed to be programmed to un-cached.
888 */
889 if (engine->class == COPY_ENGINE_CLASS) {
890 mocs = engine->gt->mocs.uc_index;
891 wa_write_clr_set(wal,
892 BLIT_CCTL(engine->mmio_base),
893 BLIT_CCTL_MASK,
894 BLIT_CCTL_MOCS(mocs, mocs));
895 }
896 }
897
898 /*
899 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
900 * defined by the hardware team, but it programming general context registers.
901 * Adding those context register programming in context workaround
902 * allow us to use the wa framework for proper application and validation.
903 */
904 static void
gen12_ctx_gt_fake_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)905 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
906 struct i915_wa_list *wal)
907 {
908 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
909 fakewa_disable_nestedbb_mode(engine, wal);
910
911 gen12_ctx_gt_mocs_init(engine, wal);
912 }
913
914 static void
__intel_engine_init_ctx_wa(struct intel_engine_cs * engine,struct i915_wa_list * wal,const char * name)915 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
916 struct i915_wa_list *wal,
917 const char *name)
918 {
919 struct drm_i915_private *i915 = engine->i915;
920
921 wa_init_start(wal, engine->gt, name, engine->name);
922
923 /* Applies to all engines */
924 /*
925 * Fake workarounds are not the actual workaround but
926 * programming of context registers using workaround framework.
927 */
928 if (GRAPHICS_VER(i915) >= 12)
929 gen12_ctx_gt_fake_wa_init(engine, wal);
930
931 if (engine->class != RENDER_CLASS)
932 goto done;
933
934 if (IS_METEORLAKE(i915))
935 mtl_ctx_workarounds_init(engine, wal);
936 else if (IS_PONTEVECCHIO(i915))
937 ; /* noop; none at this time */
938 else if (IS_DG2(i915))
939 dg2_ctx_workarounds_init(engine, wal);
940 else if (IS_XEHPSDV(i915))
941 ; /* noop; none at this time */
942 else if (IS_DG1(i915))
943 dg1_ctx_workarounds_init(engine, wal);
944 else if (GRAPHICS_VER(i915) == 12)
945 gen12_ctx_workarounds_init(engine, wal);
946 else if (GRAPHICS_VER(i915) == 11)
947 icl_ctx_workarounds_init(engine, wal);
948 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
949 cfl_ctx_workarounds_init(engine, wal);
950 else if (IS_GEMINILAKE(i915))
951 glk_ctx_workarounds_init(engine, wal);
952 else if (IS_KABYLAKE(i915))
953 kbl_ctx_workarounds_init(engine, wal);
954 else if (IS_BROXTON(i915))
955 bxt_ctx_workarounds_init(engine, wal);
956 else if (IS_SKYLAKE(i915))
957 skl_ctx_workarounds_init(engine, wal);
958 else if (IS_CHERRYVIEW(i915))
959 chv_ctx_workarounds_init(engine, wal);
960 else if (IS_BROADWELL(i915))
961 bdw_ctx_workarounds_init(engine, wal);
962 else if (GRAPHICS_VER(i915) == 7)
963 gen7_ctx_workarounds_init(engine, wal);
964 else if (GRAPHICS_VER(i915) == 6)
965 gen6_ctx_workarounds_init(engine, wal);
966 else if (GRAPHICS_VER(i915) < 8)
967 ;
968 else
969 MISSING_CASE(GRAPHICS_VER(i915));
970
971 done:
972 wa_init_finish(wal);
973 }
974
intel_engine_init_ctx_wa(struct intel_engine_cs * engine)975 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
976 {
977 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
978 }
979
intel_engine_emit_ctx_wa(struct i915_request * rq)980 int intel_engine_emit_ctx_wa(struct i915_request *rq)
981 {
982 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
983 struct intel_uncore *uncore = rq->engine->uncore;
984 enum forcewake_domains fw;
985 unsigned long flags;
986 struct i915_wa *wa;
987 unsigned int i;
988 u32 *cs;
989 int ret;
990
991 if (wal->count == 0)
992 return 0;
993
994 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
995 if (ret)
996 return ret;
997
998 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
999 if (IS_ERR(cs))
1000 return PTR_ERR(cs);
1001
1002 fw = wal_get_fw_for_rmw(uncore, wal);
1003
1004 intel_gt_mcr_lock(wal->gt, &flags);
1005 spin_lock(&uncore->lock);
1006 intel_uncore_forcewake_get__locked(uncore, fw);
1007
1008 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
1009 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1010 u32 val;
1011
1012 /* Skip reading the register if it's not really needed */
1013 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
1014 val = wa->set;
1015 } else {
1016 val = wa->is_mcr ?
1017 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
1018 intel_uncore_read_fw(uncore, wa->reg);
1019 val &= ~wa->clr;
1020 val |= wa->set;
1021 }
1022
1023 *cs++ = i915_mmio_reg_offset(wa->reg);
1024 *cs++ = val;
1025 }
1026 *cs++ = MI_NOOP;
1027
1028 intel_uncore_forcewake_put__locked(uncore, fw);
1029 spin_unlock(&uncore->lock);
1030 intel_gt_mcr_unlock(wal->gt, flags);
1031
1032 intel_ring_advance(rq, cs);
1033
1034 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1035 if (ret)
1036 return ret;
1037
1038 return 0;
1039 }
1040
1041 static void
gen4_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1042 gen4_gt_workarounds_init(struct intel_gt *gt,
1043 struct i915_wa_list *wal)
1044 {
1045 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1046 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1047 }
1048
1049 static void
g4x_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1050 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1051 {
1052 gen4_gt_workarounds_init(gt, wal);
1053
1054 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1055 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1056 }
1057
1058 static void
ilk_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1059 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1060 {
1061 g4x_gt_workarounds_init(gt, wal);
1062
1063 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1064 }
1065
1066 static void
snb_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1067 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1068 {
1069 }
1070
1071 static void
ivb_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1072 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1073 {
1074 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1075 wa_masked_dis(wal,
1076 GEN7_COMMON_SLICE_CHICKEN1,
1077 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1078
1079 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1080 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1081 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1082
1083 /* WaForceL3Serialization:ivb */
1084 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1085 }
1086
1087 static void
vlv_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1088 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1089 {
1090 /* WaForceL3Serialization:vlv */
1091 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1092
1093 /*
1094 * WaIncreaseL3CreditsForVLVB0:vlv
1095 * This is the hardware default actually.
1096 */
1097 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1098 }
1099
1100 static void
hsw_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1101 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1102 {
1103 /* L3 caching of data atomics doesn't work -- disable it. */
1104 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1105
1106 wa_add(wal,
1107 HSW_ROW_CHICKEN3, 0,
1108 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1109 0 /* XXX does this reg exist? */, true);
1110
1111 /* WaVSRefCountFullforceMissDisable:hsw */
1112 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1113 }
1114
1115 static void
gen9_wa_init_mcr(struct drm_i915_private * i915,struct i915_wa_list * wal)1116 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1117 {
1118 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1119 unsigned int slice, subslice;
1120 u32 mcr, mcr_mask;
1121
1122 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1123
1124 /*
1125 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1126 * Before any MMIO read into slice/subslice specific registers, MCR
1127 * packet control register needs to be programmed to point to any
1128 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1129 * This means each subsequent MMIO read will be forwarded to an
1130 * specific s/ss combination, but this is OK since these registers
1131 * are consistent across s/ss in almost all cases. In the rare
1132 * occasions, such as INSTDONE, where this value is dependent
1133 * on s/ss combo, the read should be done with read_subslice_reg.
1134 */
1135 slice = ffs(sseu->slice_mask) - 1;
1136 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1137 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1138 GEM_BUG_ON(!subslice);
1139 subslice--;
1140
1141 /*
1142 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1143 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1144 */
1145 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1146 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1147
1148 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1149
1150 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1151 }
1152
1153 static void
gen9_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1154 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1155 {
1156 struct drm_i915_private *i915 = gt->i915;
1157
1158 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1159 gen9_wa_init_mcr(i915, wal);
1160
1161 /* WaDisableKillLogic:bxt,skl,kbl */
1162 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1163 wa_write_or(wal,
1164 GAM_ECOCHK,
1165 ECOCHK_DIS_TLB);
1166
1167 if (HAS_LLC(i915)) {
1168 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1169 *
1170 * Must match Display Engine. See
1171 * WaCompressedResourceDisplayNewHashMode.
1172 */
1173 wa_write_or(wal,
1174 MMCD_MISC_CTRL,
1175 MMCD_PCLA | MMCD_HOTSPOT_EN);
1176 }
1177
1178 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1179 wa_write_or(wal,
1180 GAM_ECOCHK,
1181 BDW_DISABLE_HDC_INVALIDATION);
1182 }
1183
1184 static void
skl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1185 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1186 {
1187 gen9_gt_workarounds_init(gt, wal);
1188
1189 /* WaDisableGafsUnitClkGating:skl */
1190 wa_write_or(wal,
1191 GEN7_UCGCTL4,
1192 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1193
1194 /* WaInPlaceDecompressionHang:skl */
1195 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1196 wa_write_or(wal,
1197 GEN9_GAMT_ECO_REG_RW_IA,
1198 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1199 }
1200
1201 static void
kbl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1202 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1203 {
1204 gen9_gt_workarounds_init(gt, wal);
1205
1206 /* WaDisableDynamicCreditSharing:kbl */
1207 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1208 wa_write_or(wal,
1209 GAMT_CHKN_BIT_REG,
1210 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1211
1212 /* WaDisableGafsUnitClkGating:kbl */
1213 wa_write_or(wal,
1214 GEN7_UCGCTL4,
1215 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1216
1217 /* WaInPlaceDecompressionHang:kbl */
1218 wa_write_or(wal,
1219 GEN9_GAMT_ECO_REG_RW_IA,
1220 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1221 }
1222
1223 static void
glk_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1224 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1225 {
1226 gen9_gt_workarounds_init(gt, wal);
1227 }
1228
1229 static void
cfl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1230 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1231 {
1232 gen9_gt_workarounds_init(gt, wal);
1233
1234 /* WaDisableGafsUnitClkGating:cfl */
1235 wa_write_or(wal,
1236 GEN7_UCGCTL4,
1237 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1238
1239 /* WaInPlaceDecompressionHang:cfl */
1240 wa_write_or(wal,
1241 GEN9_GAMT_ECO_REG_RW_IA,
1242 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1243 }
1244
__set_mcr_steering(struct i915_wa_list * wal,i915_reg_t steering_reg,unsigned int slice,unsigned int subslice)1245 static void __set_mcr_steering(struct i915_wa_list *wal,
1246 i915_reg_t steering_reg,
1247 unsigned int slice, unsigned int subslice)
1248 {
1249 u32 mcr, mcr_mask;
1250
1251 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1252 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1253
1254 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1255 }
1256
debug_dump_steering(struct intel_gt * gt)1257 static void debug_dump_steering(struct intel_gt *gt)
1258 {
1259 struct drm_printer p = drm_debug_printer("MCR Steering:");
1260
1261 if (drm_debug_enabled(DRM_UT_DRIVER))
1262 intel_gt_mcr_report_steering(&p, gt, false);
1263 }
1264
__add_mcr_wa(struct intel_gt * gt,struct i915_wa_list * wal,unsigned int slice,unsigned int subslice)1265 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1266 unsigned int slice, unsigned int subslice)
1267 {
1268 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1269
1270 gt->default_steering.groupid = slice;
1271 gt->default_steering.instanceid = subslice;
1272
1273 debug_dump_steering(gt);
1274 }
1275
1276 static void
icl_wa_init_mcr(struct intel_gt * gt,struct i915_wa_list * wal)1277 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1278 {
1279 const struct sseu_dev_info *sseu = >->info.sseu;
1280 unsigned int subslice;
1281
1282 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1283 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1284
1285 /*
1286 * Although a platform may have subslices, we need to always steer
1287 * reads to the lowest instance that isn't fused off. When Render
1288 * Power Gating is enabled, grabbing forcewake will only power up a
1289 * single subslice (the "minconfig") if there isn't a real workload
1290 * that needs to be run; this means that if we steer register reads to
1291 * one of the higher subslices, we run the risk of reading back 0's or
1292 * random garbage.
1293 */
1294 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1295
1296 /*
1297 * If the subslice we picked above also steers us to a valid L3 bank,
1298 * then we can just rely on the default steering and won't need to
1299 * worry about explicitly re-steering L3BANK reads later.
1300 */
1301 if (gt->info.l3bank_mask & BIT(subslice))
1302 gt->steering_table[L3BANK] = NULL;
1303
1304 __add_mcr_wa(gt, wal, 0, subslice);
1305 }
1306
1307 static void
xehp_init_mcr(struct intel_gt * gt,struct i915_wa_list * wal)1308 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1309 {
1310 const struct sseu_dev_info *sseu = >->info.sseu;
1311 unsigned long slice, subslice = 0, slice_mask = 0;
1312 u32 lncf_mask = 0;
1313 int i;
1314
1315 /*
1316 * On Xe_HP the steering increases in complexity. There are now several
1317 * more units that require steering and we're not guaranteed to be able
1318 * to find a common setting for all of them. These are:
1319 * - GSLICE (fusable)
1320 * - DSS (sub-unit within gslice; fusable)
1321 * - L3 Bank (fusable)
1322 * - MSLICE (fusable)
1323 * - LNCF (sub-unit within mslice; always present if mslice is present)
1324 *
1325 * We'll do our default/implicit steering based on GSLICE (in the
1326 * sliceid field) and DSS (in the subsliceid field). If we can
1327 * find overlap between the valid MSLICE and/or LNCF values with
1328 * a suitable GSLICE, then we can just re-use the default value and
1329 * skip and explicit steering at runtime.
1330 *
1331 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1332 * a valid sliceid value. DSS steering is the only type of steering
1333 * that utilizes the 'subsliceid' bits.
1334 *
1335 * Also note that, even though the steering domain is called "GSlice"
1336 * and it is encoded in the register using the gslice format, the spec
1337 * says that the combined (geometry | compute) fuse should be used to
1338 * select the steering.
1339 */
1340
1341 /* Find the potential gslice candidates */
1342 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1343 GEN_DSS_PER_GSLICE);
1344
1345 /*
1346 * Find the potential LNCF candidates. Either LNCF within a valid
1347 * mslice is fine.
1348 */
1349 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1350 lncf_mask |= (0x3 << (i * 2));
1351
1352 /*
1353 * Are there any sliceid values that work for both GSLICE and LNCF
1354 * steering?
1355 */
1356 if (slice_mask & lncf_mask) {
1357 slice_mask &= lncf_mask;
1358 gt->steering_table[LNCF] = NULL;
1359 }
1360
1361 /* How about sliceid values that also work for MSLICE steering? */
1362 if (slice_mask & gt->info.mslice_mask) {
1363 slice_mask &= gt->info.mslice_mask;
1364 gt->steering_table[MSLICE] = NULL;
1365 }
1366
1367 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
1368 gt->steering_table[GAM] = NULL;
1369
1370 slice = __ffs(slice_mask);
1371 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1372 GEN_DSS_PER_GSLICE;
1373
1374 __add_mcr_wa(gt, wal, slice, subslice);
1375
1376 /*
1377 * SQIDI ranges are special because they use different steering
1378 * registers than everything else we work with. On XeHP SDV and
1379 * DG2-G10, any value in the steering registers will work fine since
1380 * all instances are present, but DG2-G11 only has SQIDI instances at
1381 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1382 * we'll just steer to a hardcoded "2" since that value will work
1383 * everywhere.
1384 */
1385 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1386 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1387
1388 /*
1389 * On DG2, GAM registers have a dedicated steering control register
1390 * and must always be programmed to a hardcoded groupid of "1."
1391 */
1392 if (IS_DG2(gt->i915))
1393 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1394 }
1395
1396 static void
pvc_init_mcr(struct intel_gt * gt,struct i915_wa_list * wal)1397 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1398 {
1399 unsigned int dss;
1400
1401 /*
1402 * Setup implicit steering for COMPUTE and DSS ranges to the first
1403 * non-fused-off DSS. All other types of MCR registers will be
1404 * explicitly steered.
1405 */
1406 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
1407 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
1408 }
1409
1410 static void
icl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1411 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1412 {
1413 struct drm_i915_private *i915 = gt->i915;
1414
1415 icl_wa_init_mcr(gt, wal);
1416
1417 /* WaModifyGamTlbPartitioning:icl */
1418 wa_write_clr_set(wal,
1419 GEN11_GACB_PERF_CTRL,
1420 GEN11_HASH_CTRL_MASK,
1421 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1422
1423 /* Wa_1405766107:icl
1424 * Formerly known as WaCL2SFHalfMaxAlloc
1425 */
1426 wa_write_or(wal,
1427 GEN11_LSN_UNSLCVC,
1428 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1429 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1430
1431 /* Wa_220166154:icl
1432 * Formerly known as WaDisCtxReload
1433 */
1434 wa_write_or(wal,
1435 GEN8_GAMW_ECO_DEV_RW_IA,
1436 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1437
1438 /* Wa_1406463099:icl
1439 * Formerly known as WaGamTlbPendError
1440 */
1441 wa_write_or(wal,
1442 GAMT_CHKN_BIT_REG,
1443 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1444
1445 /*
1446 * Wa_1408615072:icl,ehl (vsunit)
1447 * Wa_1407596294:icl,ehl (hsunit)
1448 */
1449 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1450 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1451
1452 /* Wa_1407352427:icl,ehl */
1453 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1454 PSDUNIT_CLKGATE_DIS);
1455
1456 /* Wa_1406680159:icl,ehl */
1457 wa_mcr_write_or(wal,
1458 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1459 GWUNIT_CLKGATE_DIS);
1460
1461 /* Wa_1607087056:icl,ehl,jsl */
1462 if (IS_ICELAKE(i915) ||
1463 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1464 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1465 wa_write_or(wal,
1466 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1467 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1468
1469 /*
1470 * This is not a documented workaround, but rather an optimization
1471 * to reduce sampler power.
1472 */
1473 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1474 }
1475
1476 /*
1477 * Though there are per-engine instances of these registers,
1478 * they retain their value through engine resets and should
1479 * only be provided on the GT workaround list rather than
1480 * the engine-specific workaround list.
1481 */
1482 static void
wa_14011060649(struct intel_gt * gt,struct i915_wa_list * wal)1483 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1484 {
1485 struct intel_engine_cs *engine;
1486 int id;
1487
1488 for_each_engine(engine, gt, id) {
1489 if (engine->class != VIDEO_DECODE_CLASS ||
1490 (engine->instance % 2))
1491 continue;
1492
1493 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1494 IECPUNIT_CLKGATE_DIS);
1495 }
1496 }
1497
1498 static void
gen12_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1499 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1500 {
1501 icl_wa_init_mcr(gt, wal);
1502
1503 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1504 wa_14011060649(gt, wal);
1505
1506 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1507 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1508
1509 /*
1510 * Wa_14015795083
1511 *
1512 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1513 * preventing i915 from modifying it for this workaround. Skip the
1514 * readback verification for this workaround on debug builds; if the
1515 * workaround doesn't stick due to firmware behavior, it's not an error
1516 * that we want CI to flag.
1517 */
1518 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1519 0, 0, false);
1520 }
1521
1522 static void
dg1_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1523 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1524 {
1525 gen12_gt_workarounds_init(gt, wal);
1526
1527 /* Wa_1409420604:dg1 */
1528 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1529 CPSSUNIT_CLKGATE_DIS);
1530
1531 /* Wa_1408615072:dg1 */
1532 /* Empirical testing shows this register is unaffected by engine reset. */
1533 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1534 }
1535
1536 static void
xehpsdv_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1537 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1538 {
1539 struct drm_i915_private *i915 = gt->i915;
1540
1541 xehp_init_mcr(gt, wal);
1542
1543 /* Wa_1409757795:xehpsdv */
1544 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
1545
1546 /* Wa_18011725039:xehpsdv */
1547 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
1548 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
1549 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
1550 }
1551
1552 /* Wa_16011155590:xehpsdv */
1553 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1554 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1555 TSGUNIT_CLKGATE_DIS);
1556
1557 /* Wa_14011780169:xehpsdv */
1558 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
1559 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1560 GAMTLBVDBOX7_CLKGATE_DIS |
1561 GAMTLBVDBOX6_CLKGATE_DIS |
1562 GAMTLBVDBOX5_CLKGATE_DIS |
1563 GAMTLBVDBOX4_CLKGATE_DIS |
1564 GAMTLBVDBOX3_CLKGATE_DIS |
1565 GAMTLBVDBOX2_CLKGATE_DIS |
1566 GAMTLBVDBOX1_CLKGATE_DIS |
1567 GAMTLBVDBOX0_CLKGATE_DIS |
1568 GAMTLBKCR_CLKGATE_DIS |
1569 GAMTLBGUC_CLKGATE_DIS |
1570 GAMTLBBLT_CLKGATE_DIS);
1571 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1572 GAMTLBGFXA1_CLKGATE_DIS |
1573 GAMTLBCOMPA0_CLKGATE_DIS |
1574 GAMTLBCOMPA1_CLKGATE_DIS |
1575 GAMTLBCOMPB0_CLKGATE_DIS |
1576 GAMTLBCOMPB1_CLKGATE_DIS |
1577 GAMTLBCOMPC0_CLKGATE_DIS |
1578 GAMTLBCOMPC1_CLKGATE_DIS |
1579 GAMTLBCOMPD0_CLKGATE_DIS |
1580 GAMTLBCOMPD1_CLKGATE_DIS |
1581 GAMTLBMERT_CLKGATE_DIS |
1582 GAMTLBVEBOX3_CLKGATE_DIS |
1583 GAMTLBVEBOX2_CLKGATE_DIS |
1584 GAMTLBVEBOX1_CLKGATE_DIS |
1585 GAMTLBVEBOX0_CLKGATE_DIS);
1586 }
1587
1588 /* Wa_16012725990:xehpsdv */
1589 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
1590 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
1591
1592 /* Wa_14011060649:xehpsdv */
1593 wa_14011060649(gt, wal);
1594
1595 /* Wa_14012362059:xehpsdv */
1596 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
1597
1598 /* Wa_14014368820:xehpsdv */
1599 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1600 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1601
1602 /* Wa_14010670810:xehpsdv */
1603 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1604 }
1605
1606 static void
dg2_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1607 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1608 {
1609 struct intel_engine_cs *engine;
1610 int id;
1611
1612 xehp_init_mcr(gt, wal);
1613
1614 /* Wa_14011060649:dg2 */
1615 wa_14011060649(gt, wal);
1616
1617 /*
1618 * Although there are per-engine instances of these registers,
1619 * they technically exist outside the engine itself and are not
1620 * impacted by engine resets. Furthermore, they're part of the
1621 * GuC blacklist so trying to treat them as engine workarounds
1622 * will result in GuC initialization failure and a wedged GPU.
1623 */
1624 for_each_engine(engine, gt, id) {
1625 if (engine->class != VIDEO_DECODE_CLASS)
1626 continue;
1627
1628 /* Wa_16010515920:dg2_g10 */
1629 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
1630 wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
1631 ALNUNIT_CLKGATE_DIS);
1632 }
1633
1634 if (IS_DG2_G10(gt->i915)) {
1635 /* Wa_22010523718:dg2 */
1636 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1637 CG3DDISCFEG_CLKGATE_DIS);
1638
1639 /* Wa_14011006942:dg2 */
1640 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1641 DSS_ROUTER_CLKGATE_DIS);
1642 }
1643
1644 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
1645 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
1646 /* Wa_14012362059:dg2 */
1647 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
1648 }
1649
1650 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
1651 /* Wa_14010948348:dg2_g10 */
1652 wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
1653
1654 /* Wa_14011037102:dg2_g10 */
1655 wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
1656
1657 /* Wa_14011371254:dg2_g10 */
1658 wa_mcr_write_or(wal, XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
1659
1660 /* Wa_14011431319:dg2_g10 */
1661 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1662 GAMTLBVDBOX7_CLKGATE_DIS |
1663 GAMTLBVDBOX6_CLKGATE_DIS |
1664 GAMTLBVDBOX5_CLKGATE_DIS |
1665 GAMTLBVDBOX4_CLKGATE_DIS |
1666 GAMTLBVDBOX3_CLKGATE_DIS |
1667 GAMTLBVDBOX2_CLKGATE_DIS |
1668 GAMTLBVDBOX1_CLKGATE_DIS |
1669 GAMTLBVDBOX0_CLKGATE_DIS |
1670 GAMTLBKCR_CLKGATE_DIS |
1671 GAMTLBGUC_CLKGATE_DIS |
1672 GAMTLBBLT_CLKGATE_DIS);
1673 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1674 GAMTLBGFXA1_CLKGATE_DIS |
1675 GAMTLBCOMPA0_CLKGATE_DIS |
1676 GAMTLBCOMPA1_CLKGATE_DIS |
1677 GAMTLBCOMPB0_CLKGATE_DIS |
1678 GAMTLBCOMPB1_CLKGATE_DIS |
1679 GAMTLBCOMPC0_CLKGATE_DIS |
1680 GAMTLBCOMPC1_CLKGATE_DIS |
1681 GAMTLBCOMPD0_CLKGATE_DIS |
1682 GAMTLBCOMPD1_CLKGATE_DIS |
1683 GAMTLBMERT_CLKGATE_DIS |
1684 GAMTLBVEBOX3_CLKGATE_DIS |
1685 GAMTLBVEBOX2_CLKGATE_DIS |
1686 GAMTLBVEBOX1_CLKGATE_DIS |
1687 GAMTLBVEBOX0_CLKGATE_DIS);
1688
1689 /* Wa_14010569222:dg2_g10 */
1690 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1691 GAMEDIA_CLKGATE_DIS);
1692
1693 /* Wa_14011028019:dg2_g10 */
1694 wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
1695
1696 /* Wa_14010680813:dg2_g10 */
1697 wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
1698 CONTROL_BLOCK_CLKGATE_DIS |
1699 EGRESS_BLOCK_CLKGATE_DIS |
1700 TAG_BLOCK_CLKGATE_DIS);
1701 }
1702
1703 /* Wa_14014830051:dg2 */
1704 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1705
1706 /* Wa_14015795083 */
1707 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1708
1709 /* Wa_18018781329 */
1710 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1711 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1712 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1713 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1714
1715 /* Wa_1509235366:dg2 */
1716 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1717 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1718
1719 /* Wa_14010648519:dg2 */
1720 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1721 }
1722
1723 static void
pvc_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1724 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1725 {
1726 pvc_init_mcr(gt, wal);
1727
1728 /* Wa_14015795083 */
1729 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1730
1731 /* Wa_18018781329 */
1732 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1733 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1734 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1735 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1736
1737 /* Wa_16016694945 */
1738 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
1739 }
1740
1741 static void
xelpg_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1742 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1743 {
1744 /* Wa_14018778641 / Wa_18018781329 */
1745 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1746
1747 /* Wa_22016670082 */
1748 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1749
1750 if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
1751 IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
1752 /* Wa_14014830051 */
1753 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1754
1755 /* Wa_14015795083 */
1756 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1757 }
1758
1759 /*
1760 * Unlike older platforms, we no longer setup implicit steering here;
1761 * all MCR accesses are explicitly steered.
1762 */
1763 debug_dump_steering(gt);
1764 }
1765
1766 static void
xelpmp_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1767 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1768 {
1769 /*
1770 * Wa_14018778641
1771 * Wa_18018781329
1772 *
1773 * Note that although these registers are MCR on the primary
1774 * GT, the media GT's versions are regular singleton registers.
1775 */
1776 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1777
1778 debug_dump_steering(gt);
1779 }
1780
1781 /*
1782 * The bspec performance guide has recommended MMIO tuning settings. These
1783 * aren't truly "workarounds" but we want to program them through the
1784 * workaround infrastructure to make sure they're (re)applied at the proper
1785 * times.
1786 *
1787 * The programming in this function is for settings that persist through
1788 * engine resets and also are not part of any engine's register state context.
1789 * I.e., settings that only need to be re-applied in the event of a full GT
1790 * reset.
1791 */
gt_tuning_settings(struct intel_gt * gt,struct i915_wa_list * wal)1792 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1793 {
1794 if (IS_METEORLAKE(gt->i915)) {
1795 if (gt->type != GT_MEDIA)
1796 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1797
1798 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1799 }
1800
1801 if (IS_PONTEVECCHIO(gt->i915)) {
1802 wa_mcr_write(wal, XEHPC_L3SCRUB,
1803 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
1804 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
1805 }
1806
1807 if (IS_DG2(gt->i915)) {
1808 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1809 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1810 }
1811 }
1812
1813 static void
gt_init_workarounds(struct intel_gt * gt,struct i915_wa_list * wal)1814 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1815 {
1816 struct drm_i915_private *i915 = gt->i915;
1817
1818 gt_tuning_settings(gt, wal);
1819
1820 if (gt->type == GT_MEDIA) {
1821 if (MEDIA_VER(i915) >= 13)
1822 xelpmp_gt_workarounds_init(gt, wal);
1823 else
1824 MISSING_CASE(MEDIA_VER(i915));
1825
1826 return;
1827 }
1828
1829 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
1830 xelpg_gt_workarounds_init(gt, wal);
1831 else if (IS_PONTEVECCHIO(i915))
1832 pvc_gt_workarounds_init(gt, wal);
1833 else if (IS_DG2(i915))
1834 dg2_gt_workarounds_init(gt, wal);
1835 else if (IS_XEHPSDV(i915))
1836 xehpsdv_gt_workarounds_init(gt, wal);
1837 else if (IS_DG1(i915))
1838 dg1_gt_workarounds_init(gt, wal);
1839 else if (GRAPHICS_VER(i915) == 12)
1840 gen12_gt_workarounds_init(gt, wal);
1841 else if (GRAPHICS_VER(i915) == 11)
1842 icl_gt_workarounds_init(gt, wal);
1843 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1844 cfl_gt_workarounds_init(gt, wal);
1845 else if (IS_GEMINILAKE(i915))
1846 glk_gt_workarounds_init(gt, wal);
1847 else if (IS_KABYLAKE(i915))
1848 kbl_gt_workarounds_init(gt, wal);
1849 else if (IS_BROXTON(i915))
1850 gen9_gt_workarounds_init(gt, wal);
1851 else if (IS_SKYLAKE(i915))
1852 skl_gt_workarounds_init(gt, wal);
1853 else if (IS_HASWELL(i915))
1854 hsw_gt_workarounds_init(gt, wal);
1855 else if (IS_VALLEYVIEW(i915))
1856 vlv_gt_workarounds_init(gt, wal);
1857 else if (IS_IVYBRIDGE(i915))
1858 ivb_gt_workarounds_init(gt, wal);
1859 else if (GRAPHICS_VER(i915) == 6)
1860 snb_gt_workarounds_init(gt, wal);
1861 else if (GRAPHICS_VER(i915) == 5)
1862 ilk_gt_workarounds_init(gt, wal);
1863 else if (IS_G4X(i915))
1864 g4x_gt_workarounds_init(gt, wal);
1865 else if (GRAPHICS_VER(i915) == 4)
1866 gen4_gt_workarounds_init(gt, wal);
1867 else if (GRAPHICS_VER(i915) <= 8)
1868 ;
1869 else
1870 MISSING_CASE(GRAPHICS_VER(i915));
1871 }
1872
intel_gt_init_workarounds(struct intel_gt * gt)1873 void intel_gt_init_workarounds(struct intel_gt *gt)
1874 {
1875 struct i915_wa_list *wal = >->wa_list;
1876
1877 wa_init_start(wal, gt, "GT", "global");
1878 gt_init_workarounds(gt, wal);
1879 wa_init_finish(wal);
1880 }
1881
1882 static bool
wa_verify(struct intel_gt * gt,const struct i915_wa * wa,u32 cur,const char * name,const char * from)1883 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1884 const char *name, const char *from)
1885 {
1886 if ((cur ^ wa->set) & wa->read) {
1887 drm_err(>->i915->drm,
1888 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1889 name, from, i915_mmio_reg_offset(wa->reg),
1890 cur, cur & wa->read, wa->set & wa->read);
1891
1892 return false;
1893 }
1894
1895 return true;
1896 }
1897
wa_list_apply(const struct i915_wa_list * wal)1898 static void wa_list_apply(const struct i915_wa_list *wal)
1899 {
1900 struct intel_gt *gt = wal->gt;
1901 struct intel_uncore *uncore = gt->uncore;
1902 enum forcewake_domains fw;
1903 unsigned long flags;
1904 struct i915_wa *wa;
1905 unsigned int i;
1906
1907 if (!wal->count)
1908 return;
1909
1910 fw = wal_get_fw_for_rmw(uncore, wal);
1911
1912 intel_gt_mcr_lock(gt, &flags);
1913 spin_lock(&uncore->lock);
1914 intel_uncore_forcewake_get__locked(uncore, fw);
1915
1916 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1917 u32 val, old = 0;
1918
1919 /* open-coded rmw due to steering */
1920 if (wa->clr)
1921 old = wa->is_mcr ?
1922 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1923 intel_uncore_read_fw(uncore, wa->reg);
1924 val = (old & ~wa->clr) | wa->set;
1925 if (val != old || !wa->clr) {
1926 if (wa->is_mcr)
1927 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1928 else
1929 intel_uncore_write_fw(uncore, wa->reg, val);
1930 }
1931
1932 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1933 u32 val = wa->is_mcr ?
1934 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1935 intel_uncore_read_fw(uncore, wa->reg);
1936
1937 wa_verify(gt, wa, val, wal->name, "application");
1938 }
1939 }
1940
1941 intel_uncore_forcewake_put__locked(uncore, fw);
1942 spin_unlock(&uncore->lock);
1943 intel_gt_mcr_unlock(gt, flags);
1944 }
1945
intel_gt_apply_workarounds(struct intel_gt * gt)1946 void intel_gt_apply_workarounds(struct intel_gt *gt)
1947 {
1948 wa_list_apply(>->wa_list);
1949 }
1950
wa_list_verify(struct intel_gt * gt,const struct i915_wa_list * wal,const char * from)1951 static bool wa_list_verify(struct intel_gt *gt,
1952 const struct i915_wa_list *wal,
1953 const char *from)
1954 {
1955 struct intel_uncore *uncore = gt->uncore;
1956 struct i915_wa *wa;
1957 enum forcewake_domains fw;
1958 unsigned long flags;
1959 unsigned int i;
1960 bool ok = true;
1961
1962 fw = wal_get_fw_for_rmw(uncore, wal);
1963
1964 intel_gt_mcr_lock(gt, &flags);
1965 spin_lock(&uncore->lock);
1966 intel_uncore_forcewake_get__locked(uncore, fw);
1967
1968 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1969 ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1970 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1971 intel_uncore_read_fw(uncore, wa->reg),
1972 wal->name, from);
1973
1974 intel_uncore_forcewake_put__locked(uncore, fw);
1975 spin_unlock(&uncore->lock);
1976 intel_gt_mcr_unlock(gt, flags);
1977
1978 return ok;
1979 }
1980
intel_gt_verify_workarounds(struct intel_gt * gt,const char * from)1981 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1982 {
1983 return wa_list_verify(gt, >->wa_list, from);
1984 }
1985
1986 __maybe_unused
is_nonpriv_flags_valid(u32 flags)1987 static bool is_nonpriv_flags_valid(u32 flags)
1988 {
1989 /* Check only valid flag bits are set */
1990 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1991 return false;
1992
1993 /* NB: Only 3 out of 4 enum values are valid for access field */
1994 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1995 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1996 return false;
1997
1998 return true;
1999 }
2000
2001 static void
whitelist_reg_ext(struct i915_wa_list * wal,i915_reg_t reg,u32 flags)2002 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
2003 {
2004 struct i915_wa wa = {
2005 .reg = reg
2006 };
2007
2008 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
2009 return;
2010
2011 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
2012 return;
2013
2014 wa.reg.reg |= flags;
2015 _wa_add(wal, &wa);
2016 }
2017
2018 static void
whitelist_mcr_reg_ext(struct i915_wa_list * wal,i915_mcr_reg_t reg,u32 flags)2019 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
2020 {
2021 struct i915_wa wa = {
2022 .mcr_reg = reg,
2023 .is_mcr = 1,
2024 };
2025
2026 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
2027 return;
2028
2029 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
2030 return;
2031
2032 wa.mcr_reg.reg |= flags;
2033 _wa_add(wal, &wa);
2034 }
2035
2036 static void
whitelist_reg(struct i915_wa_list * wal,i915_reg_t reg)2037 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
2038 {
2039 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
2040 }
2041
2042 static void
whitelist_mcr_reg(struct i915_wa_list * wal,i915_mcr_reg_t reg)2043 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
2044 {
2045 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
2046 }
2047
gen9_whitelist_build(struct i915_wa_list * w)2048 static void gen9_whitelist_build(struct i915_wa_list *w)
2049 {
2050 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
2051 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
2052
2053 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
2054 whitelist_reg(w, GEN8_CS_CHICKEN1);
2055
2056 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
2057 whitelist_reg(w, GEN8_HDC_CHICKEN1);
2058
2059 /* WaSendPushConstantsFromMMIO:skl,bxt */
2060 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
2061 }
2062
skl_whitelist_build(struct intel_engine_cs * engine)2063 static void skl_whitelist_build(struct intel_engine_cs *engine)
2064 {
2065 struct i915_wa_list *w = &engine->whitelist;
2066
2067 if (engine->class != RENDER_CLASS)
2068 return;
2069
2070 gen9_whitelist_build(w);
2071
2072 /* WaDisableLSQCROPERFforOCL:skl */
2073 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
2074 }
2075
bxt_whitelist_build(struct intel_engine_cs * engine)2076 static void bxt_whitelist_build(struct intel_engine_cs *engine)
2077 {
2078 if (engine->class != RENDER_CLASS)
2079 return;
2080
2081 gen9_whitelist_build(&engine->whitelist);
2082 }
2083
kbl_whitelist_build(struct intel_engine_cs * engine)2084 static void kbl_whitelist_build(struct intel_engine_cs *engine)
2085 {
2086 struct i915_wa_list *w = &engine->whitelist;
2087
2088 if (engine->class != RENDER_CLASS)
2089 return;
2090
2091 gen9_whitelist_build(w);
2092
2093 /* WaDisableLSQCROPERFforOCL:kbl */
2094 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
2095 }
2096
glk_whitelist_build(struct intel_engine_cs * engine)2097 static void glk_whitelist_build(struct intel_engine_cs *engine)
2098 {
2099 struct i915_wa_list *w = &engine->whitelist;
2100
2101 if (engine->class != RENDER_CLASS)
2102 return;
2103
2104 gen9_whitelist_build(w);
2105
2106 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
2107 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2108 }
2109
cfl_whitelist_build(struct intel_engine_cs * engine)2110 static void cfl_whitelist_build(struct intel_engine_cs *engine)
2111 {
2112 struct i915_wa_list *w = &engine->whitelist;
2113
2114 if (engine->class != RENDER_CLASS)
2115 return;
2116
2117 gen9_whitelist_build(w);
2118
2119 /*
2120 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
2121 *
2122 * This covers 4 register which are next to one another :
2123 * - PS_INVOCATION_COUNT
2124 * - PS_INVOCATION_COUNT_UDW
2125 * - PS_DEPTH_COUNT
2126 * - PS_DEPTH_COUNT_UDW
2127 */
2128 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2129 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2130 RING_FORCE_TO_NONPRIV_RANGE_4);
2131 }
2132
allow_read_ctx_timestamp(struct intel_engine_cs * engine)2133 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
2134 {
2135 struct i915_wa_list *w = &engine->whitelist;
2136
2137 if (engine->class != RENDER_CLASS)
2138 whitelist_reg_ext(w,
2139 RING_CTX_TIMESTAMP(engine->mmio_base),
2140 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2141 }
2142
cml_whitelist_build(struct intel_engine_cs * engine)2143 static void cml_whitelist_build(struct intel_engine_cs *engine)
2144 {
2145 allow_read_ctx_timestamp(engine);
2146
2147 cfl_whitelist_build(engine);
2148 }
2149
icl_whitelist_build(struct intel_engine_cs * engine)2150 static void icl_whitelist_build(struct intel_engine_cs *engine)
2151 {
2152 struct i915_wa_list *w = &engine->whitelist;
2153
2154 allow_read_ctx_timestamp(engine);
2155
2156 switch (engine->class) {
2157 case RENDER_CLASS:
2158 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
2159 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
2160
2161 /* WaAllowUMDToModifySamplerMode:icl */
2162 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
2163
2164 /* WaEnableStateCacheRedirectToCS:icl */
2165 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2166
2167 /*
2168 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2169 *
2170 * This covers 4 register which are next to one another :
2171 * - PS_INVOCATION_COUNT
2172 * - PS_INVOCATION_COUNT_UDW
2173 * - PS_DEPTH_COUNT
2174 * - PS_DEPTH_COUNT_UDW
2175 */
2176 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2177 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2178 RING_FORCE_TO_NONPRIV_RANGE_4);
2179 break;
2180
2181 case VIDEO_DECODE_CLASS:
2182 /* hucStatusRegOffset */
2183 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
2184 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2185 /* hucUKernelHdrInfoRegOffset */
2186 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2187 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2188 /* hucStatus2RegOffset */
2189 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2190 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2191 break;
2192
2193 default:
2194 break;
2195 }
2196 }
2197
tgl_whitelist_build(struct intel_engine_cs * engine)2198 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2199 {
2200 struct i915_wa_list *w = &engine->whitelist;
2201
2202 allow_read_ctx_timestamp(engine);
2203
2204 switch (engine->class) {
2205 case RENDER_CLASS:
2206 /*
2207 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2208 * Wa_1408556865:tgl
2209 *
2210 * This covers 4 registers which are next to one another :
2211 * - PS_INVOCATION_COUNT
2212 * - PS_INVOCATION_COUNT_UDW
2213 * - PS_DEPTH_COUNT
2214 * - PS_DEPTH_COUNT_UDW
2215 */
2216 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2217 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2218 RING_FORCE_TO_NONPRIV_RANGE_4);
2219
2220 /*
2221 * Wa_1808121037:tgl
2222 * Wa_14012131227:dg1
2223 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2224 */
2225 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2226
2227 /* Wa_1806527549:tgl */
2228 whitelist_reg(w, HIZ_CHICKEN);
2229
2230 /* Required by recommended tuning setting (not a workaround) */
2231 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2232
2233 break;
2234 default:
2235 break;
2236 }
2237 }
2238
dg2_whitelist_build(struct intel_engine_cs * engine)2239 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2240 {
2241 struct i915_wa_list *w = &engine->whitelist;
2242
2243 switch (engine->class) {
2244 case RENDER_CLASS:
2245 /*
2246 * Wa_1507100340:dg2_g10
2247 *
2248 * This covers 4 registers which are next to one another :
2249 * - PS_INVOCATION_COUNT
2250 * - PS_INVOCATION_COUNT_UDW
2251 * - PS_DEPTH_COUNT
2252 * - PS_DEPTH_COUNT_UDW
2253 */
2254 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
2255 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2256 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2257 RING_FORCE_TO_NONPRIV_RANGE_4);
2258
2259 /* Required by recommended tuning setting (not a workaround) */
2260 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2261
2262 break;
2263 case COMPUTE_CLASS:
2264 /* Wa_16011157294:dg2_g10 */
2265 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
2266 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
2267 break;
2268 default:
2269 break;
2270 }
2271 }
2272
blacklist_trtt(struct intel_engine_cs * engine)2273 static void blacklist_trtt(struct intel_engine_cs *engine)
2274 {
2275 struct i915_wa_list *w = &engine->whitelist;
2276
2277 /*
2278 * Prevent read/write access to [0x4400, 0x4600) which covers
2279 * the TRTT range across all engines. Note that normally userspace
2280 * cannot access the other engines' trtt control, but for simplicity
2281 * we cover the entire range on each engine.
2282 */
2283 whitelist_reg_ext(w, _MMIO(0x4400),
2284 RING_FORCE_TO_NONPRIV_DENY |
2285 RING_FORCE_TO_NONPRIV_RANGE_64);
2286 whitelist_reg_ext(w, _MMIO(0x4500),
2287 RING_FORCE_TO_NONPRIV_DENY |
2288 RING_FORCE_TO_NONPRIV_RANGE_64);
2289 }
2290
pvc_whitelist_build(struct intel_engine_cs * engine)2291 static void pvc_whitelist_build(struct intel_engine_cs *engine)
2292 {
2293 /* Wa_16014440446:pvc */
2294 blacklist_trtt(engine);
2295 }
2296
mtl_whitelist_build(struct intel_engine_cs * engine)2297 static void mtl_whitelist_build(struct intel_engine_cs *engine)
2298 {
2299 struct i915_wa_list *w = &engine->whitelist;
2300
2301 switch (engine->class) {
2302 case RENDER_CLASS:
2303 /* Required by recommended tuning setting (not a workaround) */
2304 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2305
2306 break;
2307 default:
2308 break;
2309 }
2310 }
2311
intel_engine_init_whitelist(struct intel_engine_cs * engine)2312 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2313 {
2314 struct drm_i915_private *i915 = engine->i915;
2315 struct i915_wa_list *w = &engine->whitelist;
2316
2317 wa_init_start(w, engine->gt, "whitelist", engine->name);
2318
2319 if (IS_METEORLAKE(i915))
2320 mtl_whitelist_build(engine);
2321 else if (IS_PONTEVECCHIO(i915))
2322 pvc_whitelist_build(engine);
2323 else if (IS_DG2(i915))
2324 dg2_whitelist_build(engine);
2325 else if (IS_XEHPSDV(i915))
2326 ; /* none needed */
2327 else if (GRAPHICS_VER(i915) == 12)
2328 tgl_whitelist_build(engine);
2329 else if (GRAPHICS_VER(i915) == 11)
2330 icl_whitelist_build(engine);
2331 else if (IS_COMETLAKE(i915))
2332 cml_whitelist_build(engine);
2333 else if (IS_COFFEELAKE(i915))
2334 cfl_whitelist_build(engine);
2335 else if (IS_GEMINILAKE(i915))
2336 glk_whitelist_build(engine);
2337 else if (IS_KABYLAKE(i915))
2338 kbl_whitelist_build(engine);
2339 else if (IS_BROXTON(i915))
2340 bxt_whitelist_build(engine);
2341 else if (IS_SKYLAKE(i915))
2342 skl_whitelist_build(engine);
2343 else if (GRAPHICS_VER(i915) <= 8)
2344 ;
2345 else
2346 MISSING_CASE(GRAPHICS_VER(i915));
2347
2348 wa_init_finish(w);
2349 }
2350
intel_engine_apply_whitelist(struct intel_engine_cs * engine)2351 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2352 {
2353 const struct i915_wa_list *wal = &engine->whitelist;
2354 struct intel_uncore *uncore = engine->uncore;
2355 const u32 base = engine->mmio_base;
2356 struct i915_wa *wa;
2357 unsigned int i;
2358
2359 if (!wal->count)
2360 return;
2361
2362 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2363 intel_uncore_write(uncore,
2364 RING_FORCE_TO_NONPRIV(base, i),
2365 i915_mmio_reg_offset(wa->reg));
2366
2367 /* And clear the rest just in case of garbage */
2368 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2369 intel_uncore_write(uncore,
2370 RING_FORCE_TO_NONPRIV(base, i),
2371 i915_mmio_reg_offset(RING_NOPID(base)));
2372 }
2373
2374 /*
2375 * engine_fake_wa_init(), a place holder to program the registers
2376 * which are not part of an official workaround defined by the
2377 * hardware team.
2378 * Adding programming of those register inside workaround will
2379 * allow utilizing wa framework to proper application and verification.
2380 */
2381 static void
engine_fake_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2382 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2383 {
2384 u8 mocs_w, mocs_r;
2385
2386 /*
2387 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2388 * by the command streamer when executing commands that don't have
2389 * a way to explicitly specify a MOCS setting. The default should
2390 * usually reference whichever MOCS entry corresponds to uncached
2391 * behavior, although use of a WB cached entry is recommended by the
2392 * spec in certain circumstances on specific platforms.
2393 */
2394 if (GRAPHICS_VER(engine->i915) >= 12) {
2395 mocs_r = engine->gt->mocs.uc_index;
2396 mocs_w = engine->gt->mocs.uc_index;
2397
2398 if (HAS_L3_CCS_READ(engine->i915) &&
2399 engine->class == COMPUTE_CLASS) {
2400 mocs_r = engine->gt->mocs.wb_index;
2401
2402 /*
2403 * Even on the few platforms where MOCS 0 is a
2404 * legitimate table entry, it's never the correct
2405 * setting to use here; we can assume the MOCS init
2406 * just forgot to initialize wb_index.
2407 */
2408 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2409 }
2410
2411 wa_masked_field_set(wal,
2412 RING_CMD_CCTL(engine->mmio_base),
2413 CMD_CCTL_MOCS_MASK,
2414 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2415 }
2416 }
2417
needs_wa_1308578152(struct intel_engine_cs * engine)2418 static bool needs_wa_1308578152(struct intel_engine_cs *engine)
2419 {
2420 return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
2421 GEN_DSS_PER_GSLICE;
2422 }
2423
2424 static void
rcs_engine_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2425 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2426 {
2427 struct drm_i915_private *i915 = engine->i915;
2428
2429 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
2430 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
2431 /* Wa_22014600077 */
2432 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2433 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2434 }
2435
2436 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
2437 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
2438 IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
2439 IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
2440 /* Wa_1509727124 */
2441 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2442 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2443 }
2444
2445 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
2446 IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
2447 IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
2448 /* Wa_22012856258 */
2449 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2450 GEN12_DISABLE_READ_SUPPRESSION);
2451 }
2452
2453 if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
2454 /* Wa_14013392000:dg2_g11 */
2455 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
2456 }
2457
2458 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
2459 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
2460 /* Wa_14012419201:dg2 */
2461 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4,
2462 GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
2463 }
2464
2465 /* Wa_1308578152:dg2_g10 when first gslice is fused off */
2466 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
2467 needs_wa_1308578152(engine)) {
2468 wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
2469 GEN12_REPLAY_MODE_GRANULARITY);
2470 }
2471
2472 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
2473 IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
2474 /*
2475 * Wa_22010960976:dg2
2476 * Wa_14013347512:dg2
2477 */
2478 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2479 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2480 }
2481
2482 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
2483 /*
2484 * Wa_1608949956:dg2_g10
2485 * Wa_14010198302:dg2_g10
2486 */
2487 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
2488 MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
2489 }
2490
2491 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
2492 /* Wa_22010430635:dg2 */
2493 wa_mcr_masked_en(wal,
2494 GEN9_ROW_CHICKEN4,
2495 GEN12_DISABLE_GRF_CLEAR);
2496
2497 /* Wa_14013202645:dg2 */
2498 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
2499 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
2500 wa_mcr_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
2501
2502 /* Wa_22012532006:dg2 */
2503 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
2504 IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
2505 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
2506 DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
2507
2508 if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
2509 IS_DG2_G10(i915)) {
2510 /* Wa_22014600077:dg2 */
2511 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2512 _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
2513 0 /* Wa_14012342262 write-only reg, so skip verification */,
2514 true);
2515 }
2516
2517 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2518 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2519 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2520 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2521
2522 /*
2523 * Wa_1407928979:tgl A*
2524 * Wa_18011464164:tgl[B0+],dg1[B0+]
2525 * Wa_22010931296:tgl[B0+],dg1[B0+]
2526 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2527 */
2528 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2529 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2530 }
2531
2532 if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
2533 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2534 /*
2535 * Wa_1606700617:tgl,dg1,adl-p
2536 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2537 * Wa_14010826681:tgl,dg1,rkl,adl-p
2538 * Wa_18019627453:dg2
2539 */
2540 wa_masked_en(wal,
2541 GEN9_CS_DEBUG_MODE1,
2542 FF_DOP_CLOCK_GATE_DISABLE);
2543 }
2544
2545 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2546 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2547 /* Wa_1409804808 */
2548 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2549 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2550
2551 /* Wa_14010229206 */
2552 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2553 }
2554
2555 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2556 /*
2557 * Wa_1607297627
2558 *
2559 * On TGL and RKL there are multiple entries for this WA in the
2560 * BSpec; some indicate this is an A0-only WA, others indicate
2561 * it applies to all steppings so we trust the "all steppings."
2562 */
2563 wa_masked_en(wal,
2564 RING_PSMI_CTL(RENDER_RING_BASE),
2565 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2566 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2567 }
2568
2569 if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
2570 IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
2571 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2572 wa_mcr_masked_en(wal,
2573 GEN10_SAMPLER_MODE,
2574 ENABLE_SMALLPL);
2575 }
2576
2577 if (GRAPHICS_VER(i915) == 11) {
2578 /* This is not an Wa. Enable for better image quality */
2579 wa_masked_en(wal,
2580 _3D_CHICKEN3,
2581 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2582
2583 /*
2584 * Wa_1405543622:icl
2585 * Formerly known as WaGAPZPriorityScheme
2586 */
2587 wa_write_or(wal,
2588 GEN8_GARBCNTL,
2589 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2590
2591 /*
2592 * Wa_1604223664:icl
2593 * Formerly known as WaL3BankAddressHashing
2594 */
2595 wa_write_clr_set(wal,
2596 GEN8_GARBCNTL,
2597 GEN11_HASH_CTRL_EXCL_MASK,
2598 GEN11_HASH_CTRL_EXCL_BIT0);
2599 wa_write_clr_set(wal,
2600 GEN11_GLBLINVL,
2601 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2602 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2603
2604 /*
2605 * Wa_1405733216:icl
2606 * Formerly known as WaDisableCleanEvicts
2607 */
2608 wa_mcr_write_or(wal,
2609 GEN8_L3SQCREG4,
2610 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2611
2612 /* Wa_1606682166:icl */
2613 wa_write_or(wal,
2614 GEN7_SARCHKMD,
2615 GEN7_DISABLE_SAMPLER_PREFETCH);
2616
2617 /* Wa_1409178092:icl */
2618 wa_mcr_write_clr_set(wal,
2619 GEN11_SCRATCH2,
2620 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2621 0);
2622
2623 /* WaEnable32PlaneMode:icl */
2624 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2625 GEN11_ENABLE_32_PLANE_MODE);
2626
2627 /*
2628 * Wa_1408767742:icl[a2..forever],ehl[all]
2629 * Wa_1605460711:icl[a0..c0]
2630 */
2631 wa_write_or(wal,
2632 GEN7_FF_THREAD_MODE,
2633 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2634
2635 /* Wa_22010271021 */
2636 wa_masked_en(wal,
2637 GEN9_CS_DEBUG_MODE1,
2638 FF_DOP_CLOCK_GATE_DISABLE);
2639 }
2640
2641 /*
2642 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2643 * beyond) allow the kernel-mode driver to choose between two different
2644 * options for controlling preemption granularity and behavior.
2645 *
2646 * Option 1 (hardware default):
2647 * Preemption settings are controlled in a global manner via
2648 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2649 * and settings chosen by the kernel-mode driver will apply to all
2650 * userspace clients.
2651 *
2652 * Option 2:
2653 * Preemption settings are controlled on a per-context basis via
2654 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2655 * context switch and is writable by userspace (e.g., via
2656 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2657 * which allows different userspace drivers/clients to select
2658 * different settings, or to change those settings on the fly in
2659 * response to runtime needs. This option was known by name
2660 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2661 * that name is somewhat misleading as other non-granularity
2662 * preemption settings are also impacted by this decision.
2663 *
2664 * On Linux, our policy has always been to let userspace drivers
2665 * control preemption granularity/settings (Option 2). This was
2666 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2667 * userspace developed before object-level preemption was enabled would
2668 * not behave well if i915 were to go with Option 1 and enable that
2669 * preemption in a global manner). On gen9 each context would have
2670 * object-level preemption disabled by default (see
2671 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2672 * userspace drivers could opt-in to object-level preemption as they
2673 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2674 * even though it is no longer necessary for ABI compatibility when
2675 * enabling a new platform, it does ensure that userspace will be able
2676 * to implement any workarounds that show up requiring temporary
2677 * adjustments to preemption behavior at runtime.
2678 *
2679 * Notes/Workarounds:
2680 * - Wa_14015141709: On DG2 and early steppings of MTL,
2681 * CS_CHICKEN1[0] does not disable object-level preemption as
2682 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2683 * using Option 1). Effectively this means userspace is unable
2684 * to disable object-level preemption on these platforms/steppings
2685 * despite the setting here.
2686 *
2687 * - Wa_16013994831: May require that userspace program
2688 * CS_CHICKEN1[10] when certain runtime conditions are true.
2689 * Userspace requires Option 2 to be in effect for their update of
2690 * CS_CHICKEN1[10] to be effective.
2691 *
2692 * Other workarounds may appear in the future that will also require
2693 * Option 2 behavior to allow proper userspace implementation.
2694 */
2695 if (GRAPHICS_VER(i915) >= 9)
2696 wa_masked_en(wal,
2697 GEN7_FF_SLICE_CS_CHICKEN1,
2698 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2699
2700 if (IS_SKYLAKE(i915) ||
2701 IS_KABYLAKE(i915) ||
2702 IS_COFFEELAKE(i915) ||
2703 IS_COMETLAKE(i915)) {
2704 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2705 wa_write_or(wal,
2706 GEN8_GARBCNTL,
2707 GEN9_GAPS_TSV_CREDIT_DISABLE);
2708 }
2709
2710 if (IS_BROXTON(i915)) {
2711 /* WaDisablePooledEuLoadBalancingFix:bxt */
2712 wa_masked_en(wal,
2713 FF_SLICE_CS_CHICKEN2,
2714 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2715 }
2716
2717 if (GRAPHICS_VER(i915) == 9) {
2718 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2719 wa_masked_en(wal,
2720 GEN9_CSFE_CHICKEN1_RCS,
2721 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2722
2723 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2724 wa_mcr_write_or(wal,
2725 BDW_SCRATCH1,
2726 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2727
2728 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2729 if (IS_GEN9_LP(i915))
2730 wa_mcr_write_clr_set(wal,
2731 GEN8_L3SQCREG1,
2732 L3_PRIO_CREDITS_MASK,
2733 L3_GENERAL_PRIO_CREDITS(62) |
2734 L3_HIGH_PRIO_CREDITS(2));
2735
2736 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2737 wa_mcr_write_or(wal,
2738 GEN8_L3SQCREG4,
2739 GEN8_LQSC_FLUSH_COHERENT_LINES);
2740
2741 /* Disable atomics in L3 to prevent unrecoverable hangs */
2742 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2743 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2744 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2745 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2746 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2747 EVICTION_PERF_FIX_ENABLE, 0);
2748 }
2749
2750 if (IS_HASWELL(i915)) {
2751 /* WaSampleCChickenBitEnable:hsw */
2752 wa_masked_en(wal,
2753 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2754
2755 wa_masked_dis(wal,
2756 CACHE_MODE_0_GEN7,
2757 /* enable HiZ Raw Stall Optimization */
2758 HIZ_RAW_STALL_OPT_DISABLE);
2759 }
2760
2761 if (IS_VALLEYVIEW(i915)) {
2762 /* WaDisableEarlyCull:vlv */
2763 wa_masked_en(wal,
2764 _3D_CHICKEN3,
2765 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2766
2767 /*
2768 * WaVSThreadDispatchOverride:ivb,vlv
2769 *
2770 * This actually overrides the dispatch
2771 * mode for all thread types.
2772 */
2773 wa_write_clr_set(wal,
2774 GEN7_FF_THREAD_MODE,
2775 GEN7_FF_SCHED_MASK,
2776 GEN7_FF_TS_SCHED_HW |
2777 GEN7_FF_VS_SCHED_HW |
2778 GEN7_FF_DS_SCHED_HW);
2779
2780 /* WaPsdDispatchEnable:vlv */
2781 /* WaDisablePSDDualDispatchEnable:vlv */
2782 wa_masked_en(wal,
2783 GEN7_HALF_SLICE_CHICKEN1,
2784 GEN7_MAX_PS_THREAD_DEP |
2785 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2786 }
2787
2788 if (IS_IVYBRIDGE(i915)) {
2789 /* WaDisableEarlyCull:ivb */
2790 wa_masked_en(wal,
2791 _3D_CHICKEN3,
2792 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2793
2794 if (0) { /* causes HiZ corruption on ivb:gt1 */
2795 /* enable HiZ Raw Stall Optimization */
2796 wa_masked_dis(wal,
2797 CACHE_MODE_0_GEN7,
2798 HIZ_RAW_STALL_OPT_DISABLE);
2799 }
2800
2801 /*
2802 * WaVSThreadDispatchOverride:ivb,vlv
2803 *
2804 * This actually overrides the dispatch
2805 * mode for all thread types.
2806 */
2807 wa_write_clr_set(wal,
2808 GEN7_FF_THREAD_MODE,
2809 GEN7_FF_SCHED_MASK,
2810 GEN7_FF_TS_SCHED_HW |
2811 GEN7_FF_VS_SCHED_HW |
2812 GEN7_FF_DS_SCHED_HW);
2813
2814 /* WaDisablePSDDualDispatchEnable:ivb */
2815 if (IS_IVB_GT1(i915))
2816 wa_masked_en(wal,
2817 GEN7_HALF_SLICE_CHICKEN1,
2818 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2819 }
2820
2821 if (GRAPHICS_VER(i915) == 7) {
2822 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2823 wa_masked_en(wal,
2824 RING_MODE_GEN7(RENDER_RING_BASE),
2825 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2826
2827 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2828 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2829
2830 /*
2831 * BSpec says this must be set, even though
2832 * WaDisable4x2SubspanOptimization:ivb,hsw
2833 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2834 */
2835 wa_masked_en(wal,
2836 CACHE_MODE_1,
2837 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2838
2839 /*
2840 * BSpec recommends 8x4 when MSAA is used,
2841 * however in practice 16x4 seems fastest.
2842 *
2843 * Note that PS/WM thread counts depend on the WIZ hashing
2844 * disable bit, which we don't touch here, but it's good
2845 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2846 */
2847 wa_masked_field_set(wal,
2848 GEN7_GT_MODE,
2849 GEN6_WIZ_HASHING_MASK,
2850 GEN6_WIZ_HASHING_16x4);
2851 }
2852
2853 if (IS_GRAPHICS_VER(i915, 6, 7))
2854 /*
2855 * We need to disable the AsyncFlip performance optimisations in
2856 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2857 * already be programmed to '1' on all products.
2858 *
2859 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2860 */
2861 wa_masked_en(wal,
2862 RING_MI_MODE(RENDER_RING_BASE),
2863 ASYNC_FLIP_PERF_DISABLE);
2864
2865 if (GRAPHICS_VER(i915) == 6) {
2866 /*
2867 * Required for the hardware to program scanline values for
2868 * waiting
2869 * WaEnableFlushTlbInvalidationMode:snb
2870 */
2871 wa_masked_en(wal,
2872 GFX_MODE,
2873 GFX_TLB_INVALIDATE_EXPLICIT);
2874
2875 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2876 wa_masked_en(wal,
2877 _3D_CHICKEN,
2878 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2879
2880 wa_masked_en(wal,
2881 _3D_CHICKEN3,
2882 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2883 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2884 /*
2885 * Bspec says:
2886 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2887 * to normal and 3DSTATE_SF number of SF output attributes
2888 * is more than 16."
2889 */
2890 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2891
2892 /*
2893 * BSpec recommends 8x4 when MSAA is used,
2894 * however in practice 16x4 seems fastest.
2895 *
2896 * Note that PS/WM thread counts depend on the WIZ hashing
2897 * disable bit, which we don't touch here, but it's good
2898 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2899 */
2900 wa_masked_field_set(wal,
2901 GEN6_GT_MODE,
2902 GEN6_WIZ_HASHING_MASK,
2903 GEN6_WIZ_HASHING_16x4);
2904
2905 /* WaDisable_RenderCache_OperationalFlush:snb */
2906 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2907
2908 /*
2909 * From the Sandybridge PRM, volume 1 part 3, page 24:
2910 * "If this bit is set, STCunit will have LRA as replacement
2911 * policy. [...] This bit must be reset. LRA replacement
2912 * policy is not supported."
2913 */
2914 wa_masked_dis(wal,
2915 CACHE_MODE_0,
2916 CM0_STC_EVICT_DISABLE_LRA_SNB);
2917 }
2918
2919 if (IS_GRAPHICS_VER(i915, 4, 6))
2920 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2921 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2922 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2923 /* XXX bit doesn't stick on Broadwater */
2924 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2925
2926 if (GRAPHICS_VER(i915) == 4)
2927 /*
2928 * Disable CONSTANT_BUFFER before it is loaded from the context
2929 * image. For as it is loaded, it is executed and the stored
2930 * address may no longer be valid, leading to a GPU hang.
2931 *
2932 * This imposes the requirement that userspace reload their
2933 * CONSTANT_BUFFER on every batch, fortunately a requirement
2934 * they are already accustomed to from before contexts were
2935 * enabled.
2936 */
2937 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2938 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2939 0 /* XXX bit doesn't stick on Broadwater */,
2940 true);
2941 }
2942
2943 static void
xcs_engine_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2944 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2945 {
2946 struct drm_i915_private *i915 = engine->i915;
2947
2948 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2949 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2950 wa_write(wal,
2951 RING_SEMA_WAIT_POLL(engine->mmio_base),
2952 1);
2953 }
2954 }
2955
2956 static void
ccs_engine_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2957 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2958 {
2959 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
2960 /* Wa_14014999345:pvc */
2961 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
2962 }
2963 }
2964
2965 /*
2966 * The bspec performance guide has recommended MMIO tuning settings. These
2967 * aren't truly "workarounds" but we want to program them with the same
2968 * workaround infrastructure to ensure that they're automatically added to
2969 * the GuC save/restore lists, re-applied at the right times, and checked for
2970 * any conflicting programming requested by real workarounds.
2971 *
2972 * Programming settings should be added here only if their registers are not
2973 * part of an engine's register state context. If a register is part of a
2974 * context, then any tuning settings should be programmed in an appropriate
2975 * function invoked by __intel_engine_init_ctx_wa().
2976 */
2977 static void
add_render_compute_tuning_settings(struct drm_i915_private * i915,struct i915_wa_list * wal)2978 add_render_compute_tuning_settings(struct drm_i915_private *i915,
2979 struct i915_wa_list *wal)
2980 {
2981 if (IS_METEORLAKE(i915) || IS_DG2(i915))
2982 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2983
2984 /*
2985 * This tuning setting proves beneficial only on ATS-M designs; the
2986 * default "age based" setting is optimal on regular DG2 and other
2987 * platforms.
2988 */
2989 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2990 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2991 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2992
2993 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
2994 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2995 }
2996
2997 /*
2998 * The workarounds in this function apply to shared registers in
2999 * the general render reset domain that aren't tied to a
3000 * specific engine. Since all render+compute engines get reset
3001 * together, and the contents of these registers are lost during
3002 * the shared render domain reset, we'll define such workarounds
3003 * here and then add them to just a single RCS or CCS engine's
3004 * workaround list (whichever engine has the XXXX flag).
3005 */
3006 static void
general_render_compute_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)3007 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
3008 {
3009 struct drm_i915_private *i915 = engine->i915;
3010
3011 add_render_compute_tuning_settings(i915, wal);
3012
3013 if (GRAPHICS_VER(i915) >= 11) {
3014 /* This is not a Wa (although referred to as
3015 * WaSetInidrectStateOverride in places), this allows
3016 * applications that reference sampler states through
3017 * the BindlessSamplerStateBaseAddress to have their
3018 * border color relative to DynamicStateBaseAddress
3019 * rather than BindlessSamplerStateBaseAddress.
3020 *
3021 * Otherwise SAMPLER_STATE border colors have to be
3022 * copied in multiple heaps (DynamicStateBaseAddress &
3023 * BindlessSamplerStateBaseAddress)
3024 *
3025 * BSpec: 46052
3026 */
3027 wa_mcr_masked_en(wal,
3028 GEN10_SAMPLER_MODE,
3029 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
3030 }
3031
3032 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
3033 IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
3034 /* Wa_14017856879 */
3035 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
3036
3037 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
3038 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
3039 /*
3040 * Wa_14017066071
3041 * Wa_14017654203
3042 */
3043 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
3044 MTL_DISABLE_SAMPLER_SC_OOO);
3045
3046 if (IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
3047 /* Wa_22015279794 */
3048 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
3049 DISABLE_PREFETCH_INTO_IC);
3050
3051 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
3052 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
3053 IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
3054 IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
3055 /* Wa_22013037850 */
3056 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
3057 DISABLE_128B_EVICTION_COMMAND_UDW);
3058 }
3059
3060 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
3061 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
3062 IS_PONTEVECCHIO(i915) ||
3063 IS_DG2(i915)) {
3064 /* Wa_22014226127 */
3065 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
3066 }
3067
3068 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
3069 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
3070 IS_DG2(i915)) {
3071 /* Wa_18017747507 */
3072 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
3073 }
3074
3075 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
3076 IS_DG2_G11(i915)) {
3077 /*
3078 * Wa_22012826095:dg2
3079 * Wa_22013059131:dg2
3080 */
3081 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
3082 MAXREQS_PER_BANK,
3083 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
3084
3085 /* Wa_22013059131:dg2 */
3086 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
3087 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
3088 }
3089
3090 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
3091 /*
3092 * Wa_14010918519:dg2_g10
3093 *
3094 * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
3095 * so ignoring verification.
3096 */
3097 wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
3098 FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
3099 0, false);
3100 }
3101
3102 if (IS_XEHPSDV(i915)) {
3103 /* Wa_1409954639 */
3104 wa_mcr_masked_en(wal,
3105 GEN8_ROW_CHICKEN,
3106 SYSTOLIC_DOP_CLOCK_GATING_DIS);
3107
3108 /* Wa_1607196519 */
3109 wa_mcr_masked_en(wal,
3110 GEN9_ROW_CHICKEN4,
3111 GEN12_DISABLE_GRF_CLEAR);
3112
3113 /* Wa_14010449647:xehpsdv */
3114 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
3115 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
3116 }
3117
3118 if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
3119 /* Wa_14015227452:dg2,pvc */
3120 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
3121
3122 /* Wa_16015675438:dg2,pvc */
3123 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
3124 }
3125
3126 if (IS_DG2(i915)) {
3127 /*
3128 * Wa_16011620976:dg2_g11
3129 * Wa_22015475538:dg2
3130 */
3131 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
3132 }
3133
3134 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915))
3135 /*
3136 * Wa_22012654132
3137 *
3138 * Note that register 0xE420 is write-only and cannot be read
3139 * back for verification on DG2 (due to Wa_14012342262), so
3140 * we need to explicitly skip the readback.
3141 */
3142 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
3143 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
3144 0 /* write-only, so skip validation */,
3145 true);
3146 }
3147
3148 static void
engine_init_workarounds(struct intel_engine_cs * engine,struct i915_wa_list * wal)3149 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
3150 {
3151 if (GRAPHICS_VER(engine->i915) < 4)
3152 return;
3153
3154 engine_fake_wa_init(engine, wal);
3155
3156 /*
3157 * These are common workarounds that just need to applied
3158 * to a single RCS/CCS engine's workaround list since
3159 * they're reset as part of the general render domain reset.
3160 */
3161 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
3162 general_render_compute_wa_init(engine, wal);
3163
3164 if (engine->class == COMPUTE_CLASS)
3165 ccs_engine_wa_init(engine, wal);
3166 else if (engine->class == RENDER_CLASS)
3167 rcs_engine_wa_init(engine, wal);
3168 else
3169 xcs_engine_wa_init(engine, wal);
3170 }
3171
intel_engine_init_workarounds(struct intel_engine_cs * engine)3172 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
3173 {
3174 struct i915_wa_list *wal = &engine->wa_list;
3175
3176 wa_init_start(wal, engine->gt, "engine", engine->name);
3177 engine_init_workarounds(engine, wal);
3178 wa_init_finish(wal);
3179 }
3180
intel_engine_apply_workarounds(struct intel_engine_cs * engine)3181 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
3182 {
3183 wa_list_apply(&engine->wa_list);
3184 }
3185
3186 static const struct i915_range mcr_ranges_gen8[] = {
3187 { .start = 0x5500, .end = 0x55ff },
3188 { .start = 0x7000, .end = 0x7fff },
3189 { .start = 0x9400, .end = 0x97ff },
3190 { .start = 0xb000, .end = 0xb3ff },
3191 { .start = 0xe000, .end = 0xe7ff },
3192 {},
3193 };
3194
3195 static const struct i915_range mcr_ranges_gen12[] = {
3196 { .start = 0x8150, .end = 0x815f },
3197 { .start = 0x9520, .end = 0x955f },
3198 { .start = 0xb100, .end = 0xb3ff },
3199 { .start = 0xde80, .end = 0xe8ff },
3200 { .start = 0x24a00, .end = 0x24a7f },
3201 {},
3202 };
3203
3204 static const struct i915_range mcr_ranges_xehp[] = {
3205 { .start = 0x4000, .end = 0x4aff },
3206 { .start = 0x5200, .end = 0x52ff },
3207 { .start = 0x5400, .end = 0x7fff },
3208 { .start = 0x8140, .end = 0x815f },
3209 { .start = 0x8c80, .end = 0x8dff },
3210 { .start = 0x94d0, .end = 0x955f },
3211 { .start = 0x9680, .end = 0x96ff },
3212 { .start = 0xb000, .end = 0xb3ff },
3213 { .start = 0xc800, .end = 0xcfff },
3214 { .start = 0xd800, .end = 0xd8ff },
3215 { .start = 0xdc00, .end = 0xffff },
3216 { .start = 0x17000, .end = 0x17fff },
3217 { .start = 0x24a00, .end = 0x24a7f },
3218 {},
3219 };
3220
mcr_range(struct drm_i915_private * i915,u32 offset)3221 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
3222 {
3223 const struct i915_range *mcr_ranges;
3224 int i;
3225
3226 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
3227 mcr_ranges = mcr_ranges_xehp;
3228 else if (GRAPHICS_VER(i915) >= 12)
3229 mcr_ranges = mcr_ranges_gen12;
3230 else if (GRAPHICS_VER(i915) >= 8)
3231 mcr_ranges = mcr_ranges_gen8;
3232 else
3233 return false;
3234
3235 /*
3236 * Registers in these ranges are affected by the MCR selector
3237 * which only controls CPU initiated MMIO. Routing does not
3238 * work for CS access so we cannot verify them on this path.
3239 */
3240 for (i = 0; mcr_ranges[i].start; i++)
3241 if (offset >= mcr_ranges[i].start &&
3242 offset <= mcr_ranges[i].end)
3243 return true;
3244
3245 return false;
3246 }
3247
3248 static int
wa_list_srm(struct i915_request * rq,const struct i915_wa_list * wal,struct i915_vma * vma)3249 wa_list_srm(struct i915_request *rq,
3250 const struct i915_wa_list *wal,
3251 struct i915_vma *vma)
3252 {
3253 struct drm_i915_private *i915 = rq->i915;
3254 unsigned int i, count = 0;
3255 const struct i915_wa *wa;
3256 u32 srm, *cs;
3257
3258 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3259 if (GRAPHICS_VER(i915) >= 8)
3260 srm++;
3261
3262 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3263 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3264 count++;
3265 }
3266
3267 cs = intel_ring_begin(rq, 4 * count);
3268 if (IS_ERR(cs))
3269 return PTR_ERR(cs);
3270
3271 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3272 u32 offset = i915_mmio_reg_offset(wa->reg);
3273
3274 if (mcr_range(i915, offset))
3275 continue;
3276
3277 *cs++ = srm;
3278 *cs++ = offset;
3279 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3280 *cs++ = 0;
3281 }
3282 intel_ring_advance(rq, cs);
3283
3284 return 0;
3285 }
3286
engine_wa_list_verify(struct intel_context * ce,const struct i915_wa_list * const wal,const char * from)3287 static int engine_wa_list_verify(struct intel_context *ce,
3288 const struct i915_wa_list * const wal,
3289 const char *from)
3290 {
3291 const struct i915_wa *wa;
3292 struct i915_request *rq;
3293 struct i915_vma *vma;
3294 struct i915_gem_ww_ctx ww;
3295 unsigned int i;
3296 u32 *results;
3297 int err;
3298
3299 if (!wal->count)
3300 return 0;
3301
3302 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3303 wal->count * sizeof(u32));
3304 if (IS_ERR(vma))
3305 return PTR_ERR(vma);
3306
3307 intel_engine_pm_get(ce->engine);
3308 i915_gem_ww_ctx_init(&ww, false);
3309 retry:
3310 err = i915_gem_object_lock(vma->obj, &ww);
3311 if (err == 0)
3312 err = intel_context_pin_ww(ce, &ww);
3313 if (err)
3314 goto err_pm;
3315
3316 err = i915_vma_pin_ww(vma, &ww, 0, 0,
3317 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3318 if (err)
3319 goto err_unpin;
3320
3321 rq = i915_request_create(ce);
3322 if (IS_ERR(rq)) {
3323 err = PTR_ERR(rq);
3324 goto err_vma;
3325 }
3326
3327 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3328 if (err == 0)
3329 err = wa_list_srm(rq, wal, vma);
3330
3331 i915_request_get(rq);
3332 if (err)
3333 i915_request_set_error_once(rq, err);
3334 i915_request_add(rq);
3335
3336 if (err)
3337 goto err_rq;
3338
3339 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3340 err = -ETIME;
3341 goto err_rq;
3342 }
3343
3344 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3345 if (IS_ERR(results)) {
3346 err = PTR_ERR(results);
3347 goto err_rq;
3348 }
3349
3350 err = 0;
3351 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3352 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3353 continue;
3354
3355 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3356 err = -ENXIO;
3357 }
3358
3359 i915_gem_object_unpin_map(vma->obj);
3360
3361 err_rq:
3362 i915_request_put(rq);
3363 err_vma:
3364 i915_vma_unpin(vma);
3365 err_unpin:
3366 intel_context_unpin(ce);
3367 err_pm:
3368 if (err == -EDEADLK) {
3369 err = i915_gem_ww_ctx_backoff(&ww);
3370 if (!err)
3371 goto retry;
3372 }
3373 i915_gem_ww_ctx_fini(&ww);
3374 intel_engine_pm_put(ce->engine);
3375 i915_vma_put(vma);
3376 return err;
3377 }
3378
intel_engine_verify_workarounds(struct intel_engine_cs * engine,const char * from)3379 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3380 const char *from)
3381 {
3382 return engine_wa_list_verify(engine->kernel_context,
3383 &engine->wa_list,
3384 from);
3385 }
3386
3387 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3388 #include "selftest_workarounds.c"
3389 #endif
3390