1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2018 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "intel_context.h"
8 #include "intel_engine_pm.h"
9 #include "intel_engine_regs.h"
10 #include "intel_gpu_commands.h"
11 #include "intel_gt.h"
12 #include "intel_gt_regs.h"
13 #include "intel_ring.h"
14 #include "intel_workarounds.h"
15
16 /**
17 * DOC: Hardware workarounds
18 *
19 * This file is intended as a central place to implement most [1]_ of the
20 * required workarounds for hardware to work as originally intended. They fall
21 * in five basic categories depending on how/when they are applied:
22 *
23 * - Workarounds that touch registers that are saved/restored to/from the HW
24 * context image. The list is emitted (via Load Register Immediate commands)
25 * everytime a new context is created.
26 * - GT workarounds. The list of these WAs is applied whenever these registers
27 * revert to default values (on GPU reset, suspend/resume [2]_, etc..).
28 * - Display workarounds. The list is applied during display clock-gating
29 * initialization.
30 * - Workarounds that whitelist a privileged register, so that UMDs can manage
31 * them directly. This is just a special case of a MMMIO workaround (as we
32 * write the list of these to/be-whitelisted registers to some special HW
33 * registers).
34 * - Workaround batchbuffers, that get executed automatically by the hardware
35 * on every HW context restore.
36 *
37 * .. [1] Please notice that there are other WAs that, due to their nature,
38 * cannot be applied from a central place. Those are peppered around the rest
39 * of the code, as needed.
40 *
41 * .. [2] Technically, some registers are powercontext saved & restored, so they
42 * survive a suspend/resume. In practice, writing them again is not too
43 * costly and simplifies things. We can revisit this in the future.
44 *
45 * Layout
46 * ~~~~~~
47 *
48 * Keep things in this file ordered by WA type, as per the above (context, GT,
49 * display, register whitelist, batchbuffer). Then, inside each type, keep the
50 * following order:
51 *
52 * - Infrastructure functions and macros
53 * - WAs per platform in standard gen/chrono order
54 * - Public functions to init or apply the given workaround type.
55 */
56
wa_init_start(struct i915_wa_list * wal,const char * name,const char * engine_name)57 static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
58 {
59 wal->name = name;
60 wal->engine_name = engine_name;
61 }
62
63 #define WA_LIST_CHUNK (1 << 4)
64
wa_init_finish(struct i915_wa_list * wal)65 static void wa_init_finish(struct i915_wa_list *wal)
66 {
67 /* Trim unused entries. */
68 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
69 struct i915_wa *list = kmemdup(wal->list,
70 wal->count * sizeof(*list),
71 GFP_KERNEL);
72
73 if (list) {
74 kfree(wal->list);
75 wal->list = list;
76 }
77 }
78
79 if (!wal->count)
80 return;
81
82 DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
83 wal->wa_count, wal->name, wal->engine_name);
84 }
85
_wa_add(struct i915_wa_list * wal,const struct i915_wa * wa)86 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
87 {
88 unsigned int addr = i915_mmio_reg_offset(wa->reg);
89 unsigned int start = 0, end = wal->count;
90 const unsigned int grow = WA_LIST_CHUNK;
91 struct i915_wa *wa_;
92
93 GEM_BUG_ON(!is_power_of_2(grow));
94
95 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
96 struct i915_wa *list;
97
98 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
99 GFP_KERNEL);
100 if (!list) {
101 DRM_ERROR("No space for workaround init!\n");
102 return;
103 }
104
105 if (wal->list) {
106 memcpy(list, wal->list, sizeof(*wa) * wal->count);
107 kfree(wal->list);
108 }
109
110 wal->list = list;
111 }
112
113 while (start < end) {
114 unsigned int mid = start + (end - start) / 2;
115
116 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
117 start = mid + 1;
118 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
119 end = mid;
120 } else {
121 wa_ = &wal->list[mid];
122
123 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
124 DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
125 i915_mmio_reg_offset(wa_->reg),
126 wa_->clr, wa_->set);
127
128 wa_->set &= ~wa->clr;
129 }
130
131 wal->wa_count++;
132 wa_->set |= wa->set;
133 wa_->clr |= wa->clr;
134 wa_->read |= wa->read;
135 return;
136 }
137 }
138
139 wal->wa_count++;
140 wa_ = &wal->list[wal->count++];
141 *wa_ = *wa;
142
143 while (wa_-- > wal->list) {
144 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
145 i915_mmio_reg_offset(wa_[1].reg));
146 if (i915_mmio_reg_offset(wa_[1].reg) >
147 i915_mmio_reg_offset(wa_[0].reg))
148 break;
149
150 swap(wa_[1], wa_[0]);
151 }
152 }
153
wa_add(struct i915_wa_list * wal,i915_reg_t reg,u32 clear,u32 set,u32 read_mask,bool masked_reg)154 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
155 u32 clear, u32 set, u32 read_mask, bool masked_reg)
156 {
157 struct i915_wa wa = {
158 .reg = reg,
159 .clr = clear,
160 .set = set,
161 .read = read_mask,
162 .masked_reg = masked_reg,
163 };
164
165 _wa_add(wal, &wa);
166 }
167
168 static void
wa_write_clr_set(struct i915_wa_list * wal,i915_reg_t reg,u32 clear,u32 set)169 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
170 {
171 wa_add(wal, reg, clear, set, clear, false);
172 }
173
174 static void
wa_write(struct i915_wa_list * wal,i915_reg_t reg,u32 set)175 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
176 {
177 wa_write_clr_set(wal, reg, ~0, set);
178 }
179
180 static void
wa_write_or(struct i915_wa_list * wal,i915_reg_t reg,u32 set)181 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
182 {
183 wa_write_clr_set(wal, reg, set, set);
184 }
185
186 static void
wa_write_clr(struct i915_wa_list * wal,i915_reg_t reg,u32 clr)187 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
188 {
189 wa_write_clr_set(wal, reg, clr, 0);
190 }
191
192 /*
193 * WA operations on "masked register". A masked register has the upper 16 bits
194 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
195 * portion of the register without a rmw: you simply write in the upper 16 bits
196 * the mask of bits you are going to modify.
197 *
198 * The wa_masked_* family of functions already does the necessary operations to
199 * calculate the mask based on the parameters passed, so user only has to
200 * provide the lower 16 bits of that register.
201 */
202
203 static void
wa_masked_en(struct i915_wa_list * wal,i915_reg_t reg,u32 val)204 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
205 {
206 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
207 }
208
209 static void
wa_masked_dis(struct i915_wa_list * wal,i915_reg_t reg,u32 val)210 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
211 {
212 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
213 }
214
215 static void
wa_masked_field_set(struct i915_wa_list * wal,i915_reg_t reg,u32 mask,u32 val)216 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
217 u32 mask, u32 val)
218 {
219 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
220 }
221
gen6_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)222 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
223 struct i915_wa_list *wal)
224 {
225 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
226 }
227
gen7_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)228 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
229 struct i915_wa_list *wal)
230 {
231 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
232 }
233
gen8_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)234 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
235 struct i915_wa_list *wal)
236 {
237 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
238
239 /* WaDisableAsyncFlipPerfMode:bdw,chv */
240 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
241
242 /* WaDisablePartialInstShootdown:bdw,chv */
243 wa_masked_en(wal, GEN8_ROW_CHICKEN,
244 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
245
246 /* Use Force Non-Coherent whenever executing a 3D context. This is a
247 * workaround for a possible hang in the unlikely event a TLB
248 * invalidation occurs during a PSD flush.
249 */
250 /* WaForceEnableNonCoherent:bdw,chv */
251 /* WaHdcDisableFetchWhenMasked:bdw,chv */
252 wa_masked_en(wal, HDC_CHICKEN0,
253 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
254 HDC_FORCE_NON_COHERENT);
255
256 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
257 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
258 * polygons in the same 8x4 pixel/sample area to be processed without
259 * stalling waiting for the earlier ones to write to Hierarchical Z
260 * buffer."
261 *
262 * This optimization is off by default for BDW and CHV; turn it on.
263 */
264 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
265
266 /* Wa4x4STCOptimizationDisable:bdw,chv */
267 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
268
269 /*
270 * BSpec recommends 8x4 when MSAA is used,
271 * however in practice 16x4 seems fastest.
272 *
273 * Note that PS/WM thread counts depend on the WIZ hashing
274 * disable bit, which we don't touch here, but it's good
275 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
276 */
277 wa_masked_field_set(wal, GEN7_GT_MODE,
278 GEN6_WIZ_HASHING_MASK,
279 GEN6_WIZ_HASHING_16x4);
280 }
281
bdw_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)282 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
283 struct i915_wa_list *wal)
284 {
285 struct drm_i915_private *i915 = engine->i915;
286
287 gen8_ctx_workarounds_init(engine, wal);
288
289 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
290 wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
291
292 /* WaDisableDopClockGating:bdw
293 *
294 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
295 * to disable EUTC clock gating.
296 */
297 wa_masked_en(wal, GEN7_ROW_CHICKEN2,
298 DOP_CLOCK_GATING_DISABLE);
299
300 wa_masked_en(wal, HALF_SLICE_CHICKEN3,
301 GEN8_SAMPLER_POWER_BYPASS_DIS);
302
303 wa_masked_en(wal, HDC_CHICKEN0,
304 /* WaForceContextSaveRestoreNonCoherent:bdw */
305 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
306 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
307 (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
308 }
309
chv_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)310 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
311 struct i915_wa_list *wal)
312 {
313 gen8_ctx_workarounds_init(engine, wal);
314
315 /* WaDisableThreadStallDopClockGating:chv */
316 wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
317
318 /* Improve HiZ throughput on CHV. */
319 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
320 }
321
gen9_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)322 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
323 struct i915_wa_list *wal)
324 {
325 struct drm_i915_private *i915 = engine->i915;
326
327 if (HAS_LLC(i915)) {
328 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
329 *
330 * Must match Display Engine. See
331 * WaCompressedResourceDisplayNewHashMode.
332 */
333 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
334 GEN9_PBE_COMPRESSED_HASH_SELECTION);
335 wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
336 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
337 }
338
339 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
340 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
341 wa_masked_en(wal, GEN8_ROW_CHICKEN,
342 FLOW_CONTROL_ENABLE |
343 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
344
345 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
346 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
347 wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
348 GEN9_ENABLE_YV12_BUGFIX |
349 GEN9_ENABLE_GPGPU_PREEMPTION);
350
351 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
352 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
353 wa_masked_en(wal, CACHE_MODE_1,
354 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
355 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
356
357 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
358 wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
359 GEN9_CCS_TLB_PREFETCH_ENABLE);
360
361 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
362 wa_masked_en(wal, HDC_CHICKEN0,
363 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
364 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
365
366 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
367 * both tied to WaForceContextSaveRestoreNonCoherent
368 * in some hsds for skl. We keep the tie for all gen9. The
369 * documentation is a bit hazy and so we want to get common behaviour,
370 * even though there is no clear evidence we would need both on kbl/bxt.
371 * This area has been source of system hangs so we play it safe
372 * and mimic the skl regardless of what bspec says.
373 *
374 * Use Force Non-Coherent whenever executing a 3D context. This
375 * is a workaround for a possible hang in the unlikely event
376 * a TLB invalidation occurs during a PSD flush.
377 */
378
379 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
380 wa_masked_en(wal, HDC_CHICKEN0,
381 HDC_FORCE_NON_COHERENT);
382
383 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
384 if (IS_SKYLAKE(i915) ||
385 IS_KABYLAKE(i915) ||
386 IS_COFFEELAKE(i915) ||
387 IS_COMETLAKE(i915))
388 wa_masked_en(wal, HALF_SLICE_CHICKEN3,
389 GEN8_SAMPLER_POWER_BYPASS_DIS);
390
391 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
392 wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
393
394 /*
395 * Supporting preemption with fine-granularity requires changes in the
396 * batch buffer programming. Since we can't break old userspace, we
397 * need to set our default preemption level to safe value. Userspace is
398 * still able to use more fine-grained preemption levels, since in
399 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
400 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
401 * not real HW workarounds, but merely a way to start using preemption
402 * while maintaining old contract with userspace.
403 */
404
405 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
406 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
407
408 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
409 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
410 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
411 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
412
413 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
414 if (IS_GEN9_LP(i915))
415 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
416 }
417
skl_tune_iz_hashing(struct intel_engine_cs * engine,struct i915_wa_list * wal)418 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
419 struct i915_wa_list *wal)
420 {
421 struct intel_gt *gt = engine->gt;
422 u8 vals[3] = { 0, 0, 0 };
423 unsigned int i;
424
425 for (i = 0; i < 3; i++) {
426 u8 ss;
427
428 /*
429 * Only consider slices where one, and only one, subslice has 7
430 * EUs
431 */
432 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
433 continue;
434
435 /*
436 * subslice_7eu[i] != 0 (because of the check above) and
437 * ss_max == 4 (maximum number of subslices possible per slice)
438 *
439 * -> 0 <= ss <= 3;
440 */
441 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
442 vals[i] = 3 - ss;
443 }
444
445 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
446 return;
447
448 /* Tune IZ hashing. See intel_device_info_runtime_init() */
449 wa_masked_field_set(wal, GEN7_GT_MODE,
450 GEN9_IZ_HASHING_MASK(2) |
451 GEN9_IZ_HASHING_MASK(1) |
452 GEN9_IZ_HASHING_MASK(0),
453 GEN9_IZ_HASHING(2, vals[2]) |
454 GEN9_IZ_HASHING(1, vals[1]) |
455 GEN9_IZ_HASHING(0, vals[0]));
456 }
457
skl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)458 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
459 struct i915_wa_list *wal)
460 {
461 gen9_ctx_workarounds_init(engine, wal);
462 skl_tune_iz_hashing(engine, wal);
463 }
464
bxt_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)465 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
466 struct i915_wa_list *wal)
467 {
468 gen9_ctx_workarounds_init(engine, wal);
469
470 /* WaDisableThreadStallDopClockGating:bxt */
471 wa_masked_en(wal, GEN8_ROW_CHICKEN,
472 STALL_DOP_GATING_DISABLE);
473
474 /* WaToEnableHwFixForPushConstHWBug:bxt */
475 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
476 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
477 }
478
kbl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)479 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
480 struct i915_wa_list *wal)
481 {
482 struct drm_i915_private *i915 = engine->i915;
483
484 gen9_ctx_workarounds_init(engine, wal);
485
486 /* WaToEnableHwFixForPushConstHWBug:kbl */
487 if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
488 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
489 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
490
491 /* WaDisableSbeCacheDispatchPortSharing:kbl */
492 wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
493 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
494 }
495
glk_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)496 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
497 struct i915_wa_list *wal)
498 {
499 gen9_ctx_workarounds_init(engine, wal);
500
501 /* WaToEnableHwFixForPushConstHWBug:glk */
502 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
503 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
504 }
505
cfl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)506 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
507 struct i915_wa_list *wal)
508 {
509 gen9_ctx_workarounds_init(engine, wal);
510
511 /* WaToEnableHwFixForPushConstHWBug:cfl */
512 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
513 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
514
515 /* WaDisableSbeCacheDispatchPortSharing:cfl */
516 wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
517 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
518 }
519
icl_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)520 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
521 struct i915_wa_list *wal)
522 {
523 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
524 wa_write(wal,
525 GEN8_L3CNTLREG,
526 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
527 GEN8_ERRDETBCTRL);
528
529 /* WaForceEnableNonCoherent:icl
530 * This is not the same workaround as in early Gen9 platforms, where
531 * lacking this could cause system hangs, but coherency performance
532 * overhead is high and only a few compute workloads really need it
533 * (the register is whitelisted in hardware now, so UMDs can opt in
534 * for coherency if they have a good reason).
535 */
536 wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
537
538 /* WaEnableFloatBlendOptimization:icl */
539 wa_add(wal, GEN10_CACHE_MODE_SS, 0,
540 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
541 0 /* write-only, so skip validation */,
542 true);
543
544 /* WaDisableGPGPUMidThreadPreemption:icl */
545 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
546 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
547 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
548
549 /* allow headerless messages for preemptible GPGPU context */
550 wa_masked_en(wal, GEN10_SAMPLER_MODE,
551 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
552
553 /* Wa_1604278689:icl,ehl */
554 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
555 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
556 0, /* write-only register; skip validation */
557 0xFFFFFFFF);
558
559 /* Wa_1406306137:icl,ehl */
560 wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
561 }
562
563 /*
564 * These settings aren't actually workarounds, but general tuning settings that
565 * need to be programmed on dg2 platform.
566 */
dg2_ctx_gt_tuning_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)567 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
568 struct i915_wa_list *wal)
569 {
570 wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
571 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
572 wa_add(wal,
573 FF_MODE2,
574 FF_MODE2_TDS_TIMER_MASK,
575 FF_MODE2_TDS_TIMER_128,
576 0, false);
577 }
578
579 /*
580 * These settings aren't actually workarounds, but general tuning settings that
581 * need to be programmed on several platforms.
582 */
gen12_ctx_gt_tuning_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)583 static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
584 struct i915_wa_list *wal)
585 {
586 /*
587 * Although some platforms refer to it as Wa_1604555607, we need to
588 * program it even on those that don't explicitly list that
589 * workaround.
590 *
591 * Note that the programming of this register is further modified
592 * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
593 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
594 * value when read. The default value for this register is zero for all
595 * fields and there are no bit masks. So instead of doing a RMW we
596 * should just write TDS timer value. For the same reason read
597 * verification is ignored.
598 */
599 wa_add(wal,
600 FF_MODE2,
601 FF_MODE2_TDS_TIMER_MASK,
602 FF_MODE2_TDS_TIMER_128,
603 0, false);
604 }
605
gen12_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)606 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
607 struct i915_wa_list *wal)
608 {
609 gen12_ctx_gt_tuning_init(engine, wal);
610
611 /*
612 * Wa_1409142259:tgl,dg1,adl-p
613 * Wa_1409347922:tgl,dg1,adl-p
614 * Wa_1409252684:tgl,dg1,adl-p
615 * Wa_1409217633:tgl,dg1,adl-p
616 * Wa_1409207793:tgl,dg1,adl-p
617 * Wa_1409178076:tgl,dg1,adl-p
618 * Wa_1408979724:tgl,dg1,adl-p
619 * Wa_14010443199:tgl,rkl,dg1,adl-p
620 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
621 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
622 */
623 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
624 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
625
626 /* WaDisableGPGPUMidThreadPreemption:gen12 */
627 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
628 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
629 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
630
631 /*
632 * Wa_16011163337
633 *
634 * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
635 * to Wa_1608008084.
636 */
637 wa_add(wal,
638 FF_MODE2,
639 FF_MODE2_GS_TIMER_MASK,
640 FF_MODE2_GS_TIMER_224,
641 0, false);
642 }
643
dg1_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)644 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
645 struct i915_wa_list *wal)
646 {
647 gen12_ctx_workarounds_init(engine, wal);
648
649 /* Wa_1409044764 */
650 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
651 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
652
653 /* Wa_22010493298 */
654 wa_masked_en(wal, HIZ_CHICKEN,
655 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
656 }
657
dg2_ctx_workarounds_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)658 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
659 struct i915_wa_list *wal)
660 {
661 dg2_ctx_gt_tuning_init(engine, wal);
662
663 /* Wa_16011186671:dg2_g11 */
664 if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
665 wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
666 wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
667 }
668
669 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
670 /* Wa_14010469329:dg2_g10 */
671 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
672 XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
673
674 /*
675 * Wa_22010465075:dg2_g10
676 * Wa_22010613112:dg2_g10
677 * Wa_14010698770:dg2_g10
678 */
679 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
680 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
681 }
682
683 /* Wa_16013271637:dg2 */
684 wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1,
685 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
686
687 /* Wa_14014947963:dg2 */
688 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
689 IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
690 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
691 }
692
fakewa_disable_nestedbb_mode(struct intel_engine_cs * engine,struct i915_wa_list * wal)693 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
694 struct i915_wa_list *wal)
695 {
696 /*
697 * This is a "fake" workaround defined by software to ensure we
698 * maintain reliable, backward-compatible behavior for userspace with
699 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
700 *
701 * The per-context setting of MI_MODE[12] determines whether the bits
702 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
703 * in the traditional manner or whether they should instead use a new
704 * tgl+ meaning that breaks backward compatibility, but allows nesting
705 * into 3rd-level batchbuffers. When this new capability was first
706 * added in TGL, it remained off by default unless a context
707 * intentionally opted in to the new behavior. However Xe_HPG now
708 * flips this on by default and requires that we explicitly opt out if
709 * we don't want the new behavior.
710 *
711 * From a SW perspective, we want to maintain the backward-compatible
712 * behavior for userspace, so we'll apply a fake workaround to set it
713 * back to the legacy behavior on platforms where the hardware default
714 * is to break compatibility. At the moment there is no Linux
715 * userspace that utilizes third-level batchbuffers, so this will avoid
716 * userspace from needing to make any changes. using the legacy
717 * meaning is the correct thing to do. If/when we have userspace
718 * consumers that want to utilize third-level batch nesting, we can
719 * provide a context parameter to allow them to opt-in.
720 */
721 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
722 }
723
gen12_ctx_gt_mocs_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)724 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
725 struct i915_wa_list *wal)
726 {
727 u8 mocs;
728
729 /*
730 * Some blitter commands do not have a field for MOCS, those
731 * commands will use MOCS index pointed by BLIT_CCTL.
732 * BLIT_CCTL registers are needed to be programmed to un-cached.
733 */
734 if (engine->class == COPY_ENGINE_CLASS) {
735 mocs = engine->gt->mocs.uc_index;
736 wa_write_clr_set(wal,
737 BLIT_CCTL(engine->mmio_base),
738 BLIT_CCTL_MASK,
739 BLIT_CCTL_MOCS(mocs, mocs));
740 }
741 }
742
743 /*
744 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
745 * defined by the hardware team, but it programming general context registers.
746 * Adding those context register programming in context workaround
747 * allow us to use the wa framework for proper application and validation.
748 */
749 static void
gen12_ctx_gt_fake_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)750 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
751 struct i915_wa_list *wal)
752 {
753 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
754 fakewa_disable_nestedbb_mode(engine, wal);
755
756 gen12_ctx_gt_mocs_init(engine, wal);
757 }
758
759 static void
__intel_engine_init_ctx_wa(struct intel_engine_cs * engine,struct i915_wa_list * wal,const char * name)760 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
761 struct i915_wa_list *wal,
762 const char *name)
763 {
764 struct drm_i915_private *i915 = engine->i915;
765
766 wa_init_start(wal, name, engine->name);
767
768 /* Applies to all engines */
769 /*
770 * Fake workarounds are not the actual workaround but
771 * programming of context registers using workaround framework.
772 */
773 if (GRAPHICS_VER(i915) >= 12)
774 gen12_ctx_gt_fake_wa_init(engine, wal);
775
776 if (engine->class != RENDER_CLASS)
777 goto done;
778
779 if (IS_DG2(i915))
780 dg2_ctx_workarounds_init(engine, wal);
781 else if (IS_XEHPSDV(i915))
782 ; /* noop; none at this time */
783 else if (IS_DG1(i915))
784 dg1_ctx_workarounds_init(engine, wal);
785 else if (GRAPHICS_VER(i915) == 12)
786 gen12_ctx_workarounds_init(engine, wal);
787 else if (GRAPHICS_VER(i915) == 11)
788 icl_ctx_workarounds_init(engine, wal);
789 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
790 cfl_ctx_workarounds_init(engine, wal);
791 else if (IS_GEMINILAKE(i915))
792 glk_ctx_workarounds_init(engine, wal);
793 else if (IS_KABYLAKE(i915))
794 kbl_ctx_workarounds_init(engine, wal);
795 else if (IS_BROXTON(i915))
796 bxt_ctx_workarounds_init(engine, wal);
797 else if (IS_SKYLAKE(i915))
798 skl_ctx_workarounds_init(engine, wal);
799 else if (IS_CHERRYVIEW(i915))
800 chv_ctx_workarounds_init(engine, wal);
801 else if (IS_BROADWELL(i915))
802 bdw_ctx_workarounds_init(engine, wal);
803 else if (GRAPHICS_VER(i915) == 7)
804 gen7_ctx_workarounds_init(engine, wal);
805 else if (GRAPHICS_VER(i915) == 6)
806 gen6_ctx_workarounds_init(engine, wal);
807 else if (GRAPHICS_VER(i915) < 8)
808 ;
809 else
810 MISSING_CASE(GRAPHICS_VER(i915));
811
812 done:
813 wa_init_finish(wal);
814 }
815
intel_engine_init_ctx_wa(struct intel_engine_cs * engine)816 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
817 {
818 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
819 }
820
intel_engine_emit_ctx_wa(struct i915_request * rq)821 int intel_engine_emit_ctx_wa(struct i915_request *rq)
822 {
823 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
824 struct i915_wa *wa;
825 unsigned int i;
826 u32 *cs;
827 int ret;
828
829 if (wal->count == 0)
830 return 0;
831
832 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
833 if (ret)
834 return ret;
835
836 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
837 if (IS_ERR(cs))
838 return PTR_ERR(cs);
839
840 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
841 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
842 *cs++ = i915_mmio_reg_offset(wa->reg);
843 *cs++ = wa->set;
844 }
845 *cs++ = MI_NOOP;
846
847 intel_ring_advance(rq, cs);
848
849 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
850 if (ret)
851 return ret;
852
853 return 0;
854 }
855
856 static void
gen4_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)857 gen4_gt_workarounds_init(struct intel_gt *gt,
858 struct i915_wa_list *wal)
859 {
860 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
861 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
862 }
863
864 static void
g4x_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)865 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
866 {
867 gen4_gt_workarounds_init(gt, wal);
868
869 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
870 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
871 }
872
873 static void
ilk_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)874 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
875 {
876 g4x_gt_workarounds_init(gt, wal);
877
878 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
879 }
880
881 static void
snb_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)882 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
883 {
884 }
885
886 static void
ivb_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)887 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
888 {
889 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
890 wa_masked_dis(wal,
891 GEN7_COMMON_SLICE_CHICKEN1,
892 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
893
894 /* WaApplyL3ControlAndL3ChickenMode:ivb */
895 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
896 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
897
898 /* WaForceL3Serialization:ivb */
899 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
900 }
901
902 static void
vlv_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)903 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
904 {
905 /* WaForceL3Serialization:vlv */
906 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
907
908 /*
909 * WaIncreaseL3CreditsForVLVB0:vlv
910 * This is the hardware default actually.
911 */
912 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
913 }
914
915 static void
hsw_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)916 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
917 {
918 /* L3 caching of data atomics doesn't work -- disable it. */
919 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
920
921 wa_add(wal,
922 HSW_ROW_CHICKEN3, 0,
923 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
924 0 /* XXX does this reg exist? */, true);
925
926 /* WaVSRefCountFullforceMissDisable:hsw */
927 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
928 }
929
930 static void
gen9_wa_init_mcr(struct drm_i915_private * i915,struct i915_wa_list * wal)931 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
932 {
933 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
934 unsigned int slice, subslice;
935 u32 mcr, mcr_mask;
936
937 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
938
939 /*
940 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
941 * Before any MMIO read into slice/subslice specific registers, MCR
942 * packet control register needs to be programmed to point to any
943 * enabled s/ss pair. Otherwise, incorrect values will be returned.
944 * This means each subsequent MMIO read will be forwarded to an
945 * specific s/ss combination, but this is OK since these registers
946 * are consistent across s/ss in almost all cases. In the rare
947 * occasions, such as INSTDONE, where this value is dependent
948 * on s/ss combo, the read should be done with read_subslice_reg.
949 */
950 slice = ffs(sseu->slice_mask) - 1;
951 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
952 subslice = ffs(intel_sseu_get_subslices(sseu, slice));
953 GEM_BUG_ON(!subslice);
954 subslice--;
955
956 /*
957 * We use GEN8_MCR..() macros to calculate the |mcr| value for
958 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
959 */
960 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
961 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
962
963 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
964
965 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
966 }
967
968 static void
gen9_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)969 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
970 {
971 struct drm_i915_private *i915 = gt->i915;
972
973 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
974 gen9_wa_init_mcr(i915, wal);
975
976 /* WaDisableKillLogic:bxt,skl,kbl */
977 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
978 wa_write_or(wal,
979 GAM_ECOCHK,
980 ECOCHK_DIS_TLB);
981
982 if (HAS_LLC(i915)) {
983 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
984 *
985 * Must match Display Engine. See
986 * WaCompressedResourceDisplayNewHashMode.
987 */
988 wa_write_or(wal,
989 MMCD_MISC_CTRL,
990 MMCD_PCLA | MMCD_HOTSPOT_EN);
991 }
992
993 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
994 wa_write_or(wal,
995 GAM_ECOCHK,
996 BDW_DISABLE_HDC_INVALIDATION);
997 }
998
999 static void
skl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1000 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1001 {
1002 gen9_gt_workarounds_init(gt, wal);
1003
1004 /* WaDisableGafsUnitClkGating:skl */
1005 wa_write_or(wal,
1006 GEN7_UCGCTL4,
1007 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1008
1009 /* WaInPlaceDecompressionHang:skl */
1010 if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1011 wa_write_or(wal,
1012 GEN9_GAMT_ECO_REG_RW_IA,
1013 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1014 }
1015
1016 static void
kbl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1017 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1018 {
1019 gen9_gt_workarounds_init(gt, wal);
1020
1021 /* WaDisableDynamicCreditSharing:kbl */
1022 if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1023 wa_write_or(wal,
1024 GAMT_CHKN_BIT_REG,
1025 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1026
1027 /* WaDisableGafsUnitClkGating:kbl */
1028 wa_write_or(wal,
1029 GEN7_UCGCTL4,
1030 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1031
1032 /* WaInPlaceDecompressionHang:kbl */
1033 wa_write_or(wal,
1034 GEN9_GAMT_ECO_REG_RW_IA,
1035 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1036 }
1037
1038 static void
glk_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1039 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1040 {
1041 gen9_gt_workarounds_init(gt, wal);
1042 }
1043
1044 static void
cfl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1045 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1046 {
1047 gen9_gt_workarounds_init(gt, wal);
1048
1049 /* WaDisableGafsUnitClkGating:cfl */
1050 wa_write_or(wal,
1051 GEN7_UCGCTL4,
1052 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1053
1054 /* WaInPlaceDecompressionHang:cfl */
1055 wa_write_or(wal,
1056 GEN9_GAMT_ECO_REG_RW_IA,
1057 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1058 }
1059
__set_mcr_steering(struct i915_wa_list * wal,i915_reg_t steering_reg,unsigned int slice,unsigned int subslice)1060 static void __set_mcr_steering(struct i915_wa_list *wal,
1061 i915_reg_t steering_reg,
1062 unsigned int slice, unsigned int subslice)
1063 {
1064 u32 mcr, mcr_mask;
1065
1066 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1067 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1068
1069 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1070 }
1071
__add_mcr_wa(struct intel_gt * gt,struct i915_wa_list * wal,unsigned int slice,unsigned int subslice)1072 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1073 unsigned int slice, unsigned int subslice)
1074 {
1075 struct drm_printer p = drm_debug_printer("MCR Steering:");
1076
1077 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1078
1079 gt->default_steering.groupid = slice;
1080 gt->default_steering.instanceid = subslice;
1081
1082 if (drm_debug_enabled(DRM_UT_DRIVER))
1083 intel_gt_report_steering(&p, gt, false);
1084 }
1085
1086 static void
icl_wa_init_mcr(struct intel_gt * gt,struct i915_wa_list * wal)1087 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1088 {
1089 const struct sseu_dev_info *sseu = >->info.sseu;
1090 unsigned int slice, subslice;
1091
1092 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1093 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1094 slice = 0;
1095
1096 /*
1097 * Although a platform may have subslices, we need to always steer
1098 * reads to the lowest instance that isn't fused off. When Render
1099 * Power Gating is enabled, grabbing forcewake will only power up a
1100 * single subslice (the "minconfig") if there isn't a real workload
1101 * that needs to be run; this means that if we steer register reads to
1102 * one of the higher subslices, we run the risk of reading back 0's or
1103 * random garbage.
1104 */
1105 subslice = __ffs(intel_sseu_get_subslices(sseu, slice));
1106
1107 /*
1108 * If the subslice we picked above also steers us to a valid L3 bank,
1109 * then we can just rely on the default steering and won't need to
1110 * worry about explicitly re-steering L3BANK reads later.
1111 */
1112 if (gt->info.l3bank_mask & BIT(subslice))
1113 gt->steering_table[L3BANK] = NULL;
1114
1115 __add_mcr_wa(gt, wal, slice, subslice);
1116 }
1117
1118 static void
xehp_init_mcr(struct intel_gt * gt,struct i915_wa_list * wal)1119 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1120 {
1121 const struct sseu_dev_info *sseu = >->info.sseu;
1122 unsigned long slice, subslice = 0, slice_mask = 0;
1123 u64 dss_mask = 0;
1124 u32 lncf_mask = 0;
1125 int i;
1126
1127 /*
1128 * On Xe_HP the steering increases in complexity. There are now several
1129 * more units that require steering and we're not guaranteed to be able
1130 * to find a common setting for all of them. These are:
1131 * - GSLICE (fusable)
1132 * - DSS (sub-unit within gslice; fusable)
1133 * - L3 Bank (fusable)
1134 * - MSLICE (fusable)
1135 * - LNCF (sub-unit within mslice; always present if mslice is present)
1136 *
1137 * We'll do our default/implicit steering based on GSLICE (in the
1138 * sliceid field) and DSS (in the subsliceid field). If we can
1139 * find overlap between the valid MSLICE and/or LNCF values with
1140 * a suitable GSLICE, then we can just re-use the default value and
1141 * skip and explicit steering at runtime.
1142 *
1143 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1144 * a valid sliceid value. DSS steering is the only type of steering
1145 * that utilizes the 'subsliceid' bits.
1146 *
1147 * Also note that, even though the steering domain is called "GSlice"
1148 * and it is encoded in the register using the gslice format, the spec
1149 * says that the combined (geometry | compute) fuse should be used to
1150 * select the steering.
1151 */
1152
1153 /* Find the potential gslice candidates */
1154 dss_mask = intel_sseu_get_subslices(sseu, 0);
1155 slice_mask = intel_slicemask_from_dssmask(dss_mask, GEN_DSS_PER_GSLICE);
1156
1157 /*
1158 * Find the potential LNCF candidates. Either LNCF within a valid
1159 * mslice is fine.
1160 */
1161 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1162 lncf_mask |= (0x3 << (i * 2));
1163
1164 /*
1165 * Are there any sliceid values that work for both GSLICE and LNCF
1166 * steering?
1167 */
1168 if (slice_mask & lncf_mask) {
1169 slice_mask &= lncf_mask;
1170 gt->steering_table[LNCF] = NULL;
1171 }
1172
1173 /* How about sliceid values that also work for MSLICE steering? */
1174 if (slice_mask & gt->info.mslice_mask) {
1175 slice_mask &= gt->info.mslice_mask;
1176 gt->steering_table[MSLICE] = NULL;
1177 }
1178
1179 slice = __ffs(slice_mask);
1180 subslice = __ffs(dss_mask >> (slice * GEN_DSS_PER_GSLICE));
1181 WARN_ON(subslice > GEN_DSS_PER_GSLICE);
1182 WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
1183
1184 __add_mcr_wa(gt, wal, slice, subslice);
1185
1186 /*
1187 * SQIDI ranges are special because they use different steering
1188 * registers than everything else we work with. On XeHP SDV and
1189 * DG2-G10, any value in the steering registers will work fine since
1190 * all instances are present, but DG2-G11 only has SQIDI instances at
1191 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1192 * we'll just steer to a hardcoded "2" since that value will work
1193 * everywhere.
1194 */
1195 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1196 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1197 }
1198
1199 static void
icl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1200 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1201 {
1202 struct drm_i915_private *i915 = gt->i915;
1203
1204 icl_wa_init_mcr(gt, wal);
1205
1206 /* WaModifyGamTlbPartitioning:icl */
1207 wa_write_clr_set(wal,
1208 GEN11_GACB_PERF_CTRL,
1209 GEN11_HASH_CTRL_MASK,
1210 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1211
1212 /* Wa_1405766107:icl
1213 * Formerly known as WaCL2SFHalfMaxAlloc
1214 */
1215 wa_write_or(wal,
1216 GEN11_LSN_UNSLCVC,
1217 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1218 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1219
1220 /* Wa_220166154:icl
1221 * Formerly known as WaDisCtxReload
1222 */
1223 wa_write_or(wal,
1224 GEN8_GAMW_ECO_DEV_RW_IA,
1225 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1226
1227 /* Wa_1406463099:icl
1228 * Formerly known as WaGamTlbPendError
1229 */
1230 wa_write_or(wal,
1231 GAMT_CHKN_BIT_REG,
1232 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1233
1234 /* Wa_1407352427:icl,ehl */
1235 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1236 PSDUNIT_CLKGATE_DIS);
1237
1238 /* Wa_1406680159:icl,ehl */
1239 wa_write_or(wal,
1240 SUBSLICE_UNIT_LEVEL_CLKGATE,
1241 GWUNIT_CLKGATE_DIS);
1242
1243 /* Wa_1607087056:icl,ehl,jsl */
1244 if (IS_ICELAKE(i915) ||
1245 IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1246 wa_write_or(wal,
1247 SLICE_UNIT_LEVEL_CLKGATE,
1248 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1249
1250 /*
1251 * This is not a documented workaround, but rather an optimization
1252 * to reduce sampler power.
1253 */
1254 wa_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1255 }
1256
1257 /*
1258 * Though there are per-engine instances of these registers,
1259 * they retain their value through engine resets and should
1260 * only be provided on the GT workaround list rather than
1261 * the engine-specific workaround list.
1262 */
1263 static void
wa_14011060649(struct intel_gt * gt,struct i915_wa_list * wal)1264 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1265 {
1266 struct intel_engine_cs *engine;
1267 int id;
1268
1269 for_each_engine(engine, gt, id) {
1270 if (engine->class != VIDEO_DECODE_CLASS ||
1271 (engine->instance % 2))
1272 continue;
1273
1274 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1275 IECPUNIT_CLKGATE_DIS);
1276 }
1277 }
1278
1279 static void
gen12_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1280 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1281 {
1282 icl_wa_init_mcr(gt, wal);
1283
1284 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1285 wa_14011060649(gt, wal);
1286
1287 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1288 wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1289 }
1290
1291 static void
tgl_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1292 tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1293 {
1294 struct drm_i915_private *i915 = gt->i915;
1295
1296 gen12_gt_workarounds_init(gt, wal);
1297
1298 /* Wa_1409420604:tgl */
1299 if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1300 wa_write_or(wal,
1301 SUBSLICE_UNIT_LEVEL_CLKGATE2,
1302 CPSSUNIT_CLKGATE_DIS);
1303
1304 /* Wa_1607087056:tgl also know as BUG:1409180338 */
1305 if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1306 wa_write_or(wal,
1307 SLICE_UNIT_LEVEL_CLKGATE,
1308 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1309
1310 /* Wa_1408615072:tgl[a0] */
1311 if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1312 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1313 VSUNIT_CLKGATE_DIS_TGL);
1314 }
1315
1316 static void
dg1_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1317 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1318 {
1319 struct drm_i915_private *i915 = gt->i915;
1320
1321 gen12_gt_workarounds_init(gt, wal);
1322
1323 /* Wa_1607087056:dg1 */
1324 if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1325 wa_write_or(wal,
1326 SLICE_UNIT_LEVEL_CLKGATE,
1327 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1328
1329 /* Wa_1409420604:dg1 */
1330 if (IS_DG1(i915))
1331 wa_write_or(wal,
1332 SUBSLICE_UNIT_LEVEL_CLKGATE2,
1333 CPSSUNIT_CLKGATE_DIS);
1334
1335 /* Wa_1408615072:dg1 */
1336 /* Empirical testing shows this register is unaffected by engine reset. */
1337 if (IS_DG1(i915))
1338 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1339 VSUNIT_CLKGATE_DIS_TGL);
1340 }
1341
1342 static void
xehpsdv_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1343 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1344 {
1345 struct drm_i915_private *i915 = gt->i915;
1346
1347 xehp_init_mcr(gt, wal);
1348
1349 /* Wa_1409757795:xehpsdv */
1350 wa_write_or(wal, SCCGCTL94DC, CG3DDISURB);
1351
1352 /* Wa_16011155590:xehpsdv */
1353 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1354 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1355 TSGUNIT_CLKGATE_DIS);
1356
1357 /* Wa_14011780169:xehpsdv */
1358 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
1359 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1360 GAMTLBVDBOX7_CLKGATE_DIS |
1361 GAMTLBVDBOX6_CLKGATE_DIS |
1362 GAMTLBVDBOX5_CLKGATE_DIS |
1363 GAMTLBVDBOX4_CLKGATE_DIS |
1364 GAMTLBVDBOX3_CLKGATE_DIS |
1365 GAMTLBVDBOX2_CLKGATE_DIS |
1366 GAMTLBVDBOX1_CLKGATE_DIS |
1367 GAMTLBVDBOX0_CLKGATE_DIS |
1368 GAMTLBKCR_CLKGATE_DIS |
1369 GAMTLBGUC_CLKGATE_DIS |
1370 GAMTLBBLT_CLKGATE_DIS);
1371 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1372 GAMTLBGFXA1_CLKGATE_DIS |
1373 GAMTLBCOMPA0_CLKGATE_DIS |
1374 GAMTLBCOMPA1_CLKGATE_DIS |
1375 GAMTLBCOMPB0_CLKGATE_DIS |
1376 GAMTLBCOMPB1_CLKGATE_DIS |
1377 GAMTLBCOMPC0_CLKGATE_DIS |
1378 GAMTLBCOMPC1_CLKGATE_DIS |
1379 GAMTLBCOMPD0_CLKGATE_DIS |
1380 GAMTLBCOMPD1_CLKGATE_DIS |
1381 GAMTLBMERT_CLKGATE_DIS |
1382 GAMTLBVEBOX3_CLKGATE_DIS |
1383 GAMTLBVEBOX2_CLKGATE_DIS |
1384 GAMTLBVEBOX1_CLKGATE_DIS |
1385 GAMTLBVEBOX0_CLKGATE_DIS);
1386 }
1387
1388 /* Wa_16012725990:xehpsdv */
1389 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
1390 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
1391
1392 /* Wa_14011060649:xehpsdv */
1393 wa_14011060649(gt, wal);
1394 }
1395
1396 static void
dg2_gt_workarounds_init(struct intel_gt * gt,struct i915_wa_list * wal)1397 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1398 {
1399 struct intel_engine_cs *engine;
1400 int id;
1401
1402 xehp_init_mcr(gt, wal);
1403
1404 /* Wa_14011060649:dg2 */
1405 wa_14011060649(gt, wal);
1406
1407 /*
1408 * Although there are per-engine instances of these registers,
1409 * they technically exist outside the engine itself and are not
1410 * impacted by engine resets. Furthermore, they're part of the
1411 * GuC blacklist so trying to treat them as engine workarounds
1412 * will result in GuC initialization failure and a wedged GPU.
1413 */
1414 for_each_engine(engine, gt, id) {
1415 if (engine->class != VIDEO_DECODE_CLASS)
1416 continue;
1417
1418 /* Wa_16010515920:dg2_g10 */
1419 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
1420 wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
1421 ALNUNIT_CLKGATE_DIS);
1422 }
1423
1424 if (IS_DG2_G10(gt->i915)) {
1425 /* Wa_22010523718:dg2 */
1426 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1427 CG3DDISCFEG_CLKGATE_DIS);
1428
1429 /* Wa_14011006942:dg2 */
1430 wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE,
1431 DSS_ROUTER_CLKGATE_DIS);
1432 }
1433
1434 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
1435 /* Wa_14010948348:dg2_g10 */
1436 wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
1437
1438 /* Wa_14011037102:dg2_g10 */
1439 wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
1440
1441 /* Wa_14011371254:dg2_g10 */
1442 wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
1443
1444 /* Wa_14011431319:dg2_g10 */
1445 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1446 GAMTLBVDBOX7_CLKGATE_DIS |
1447 GAMTLBVDBOX6_CLKGATE_DIS |
1448 GAMTLBVDBOX5_CLKGATE_DIS |
1449 GAMTLBVDBOX4_CLKGATE_DIS |
1450 GAMTLBVDBOX3_CLKGATE_DIS |
1451 GAMTLBVDBOX2_CLKGATE_DIS |
1452 GAMTLBVDBOX1_CLKGATE_DIS |
1453 GAMTLBVDBOX0_CLKGATE_DIS |
1454 GAMTLBKCR_CLKGATE_DIS |
1455 GAMTLBGUC_CLKGATE_DIS |
1456 GAMTLBBLT_CLKGATE_DIS);
1457 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1458 GAMTLBGFXA1_CLKGATE_DIS |
1459 GAMTLBCOMPA0_CLKGATE_DIS |
1460 GAMTLBCOMPA1_CLKGATE_DIS |
1461 GAMTLBCOMPB0_CLKGATE_DIS |
1462 GAMTLBCOMPB1_CLKGATE_DIS |
1463 GAMTLBCOMPC0_CLKGATE_DIS |
1464 GAMTLBCOMPC1_CLKGATE_DIS |
1465 GAMTLBCOMPD0_CLKGATE_DIS |
1466 GAMTLBCOMPD1_CLKGATE_DIS |
1467 GAMTLBMERT_CLKGATE_DIS |
1468 GAMTLBVEBOX3_CLKGATE_DIS |
1469 GAMTLBVEBOX2_CLKGATE_DIS |
1470 GAMTLBVEBOX1_CLKGATE_DIS |
1471 GAMTLBVEBOX0_CLKGATE_DIS);
1472
1473 /* Wa_14010569222:dg2_g10 */
1474 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1475 GAMEDIA_CLKGATE_DIS);
1476
1477 /* Wa_14011028019:dg2_g10 */
1478 wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
1479 }
1480
1481 /* Wa_14014830051:dg2 */
1482 wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1483
1484 /*
1485 * The following are not actually "workarounds" but rather
1486 * recommended tuning settings documented in the bspec's
1487 * performance guide section.
1488 */
1489 wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
1490 }
1491
1492 static void
gt_init_workarounds(struct intel_gt * gt,struct i915_wa_list * wal)1493 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1494 {
1495 struct drm_i915_private *i915 = gt->i915;
1496
1497 if (IS_DG2(i915))
1498 dg2_gt_workarounds_init(gt, wal);
1499 else if (IS_XEHPSDV(i915))
1500 xehpsdv_gt_workarounds_init(gt, wal);
1501 else if (IS_DG1(i915))
1502 dg1_gt_workarounds_init(gt, wal);
1503 else if (IS_TIGERLAKE(i915))
1504 tgl_gt_workarounds_init(gt, wal);
1505 else if (GRAPHICS_VER(i915) == 12)
1506 gen12_gt_workarounds_init(gt, wal);
1507 else if (GRAPHICS_VER(i915) == 11)
1508 icl_gt_workarounds_init(gt, wal);
1509 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1510 cfl_gt_workarounds_init(gt, wal);
1511 else if (IS_GEMINILAKE(i915))
1512 glk_gt_workarounds_init(gt, wal);
1513 else if (IS_KABYLAKE(i915))
1514 kbl_gt_workarounds_init(gt, wal);
1515 else if (IS_BROXTON(i915))
1516 gen9_gt_workarounds_init(gt, wal);
1517 else if (IS_SKYLAKE(i915))
1518 skl_gt_workarounds_init(gt, wal);
1519 else if (IS_HASWELL(i915))
1520 hsw_gt_workarounds_init(gt, wal);
1521 else if (IS_VALLEYVIEW(i915))
1522 vlv_gt_workarounds_init(gt, wal);
1523 else if (IS_IVYBRIDGE(i915))
1524 ivb_gt_workarounds_init(gt, wal);
1525 else if (GRAPHICS_VER(i915) == 6)
1526 snb_gt_workarounds_init(gt, wal);
1527 else if (GRAPHICS_VER(i915) == 5)
1528 ilk_gt_workarounds_init(gt, wal);
1529 else if (IS_G4X(i915))
1530 g4x_gt_workarounds_init(gt, wal);
1531 else if (GRAPHICS_VER(i915) == 4)
1532 gen4_gt_workarounds_init(gt, wal);
1533 else if (GRAPHICS_VER(i915) <= 8)
1534 ;
1535 else
1536 MISSING_CASE(GRAPHICS_VER(i915));
1537 }
1538
intel_gt_init_workarounds(struct intel_gt * gt)1539 void intel_gt_init_workarounds(struct intel_gt *gt)
1540 {
1541 struct i915_wa_list *wal = >->wa_list;
1542
1543 wa_init_start(wal, "GT", "global");
1544 gt_init_workarounds(gt, wal);
1545 wa_init_finish(wal);
1546 }
1547
1548 static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore * uncore,const struct i915_wa_list * wal)1549 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
1550 {
1551 enum forcewake_domains fw = 0;
1552 struct i915_wa *wa;
1553 unsigned int i;
1554
1555 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1556 fw |= intel_uncore_forcewake_for_reg(uncore,
1557 wa->reg,
1558 FW_REG_READ |
1559 FW_REG_WRITE);
1560
1561 return fw;
1562 }
1563
1564 static bool
wa_verify(const struct i915_wa * wa,u32 cur,const char * name,const char * from)1565 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
1566 {
1567 if ((cur ^ wa->set) & wa->read) {
1568 DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1569 name, from, i915_mmio_reg_offset(wa->reg),
1570 cur, cur & wa->read, wa->set & wa->read);
1571
1572 return false;
1573 }
1574
1575 return true;
1576 }
1577
1578 static void
wa_list_apply(struct intel_gt * gt,const struct i915_wa_list * wal)1579 wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
1580 {
1581 struct intel_uncore *uncore = gt->uncore;
1582 enum forcewake_domains fw;
1583 unsigned long flags;
1584 struct i915_wa *wa;
1585 unsigned int i;
1586
1587 if (!wal->count)
1588 return;
1589
1590 fw = wal_get_fw_for_rmw(uncore, wal);
1591
1592 spin_lock_irqsave(&uncore->lock, flags);
1593 intel_uncore_forcewake_get__locked(uncore, fw);
1594
1595 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1596 u32 val, old = 0;
1597
1598 /* open-coded rmw due to steering */
1599 old = wa->clr ? intel_gt_read_register_fw(gt, wa->reg) : 0;
1600 val = (old & ~wa->clr) | wa->set;
1601 if (val != old || !wa->clr)
1602 intel_uncore_write_fw(uncore, wa->reg, val);
1603
1604 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1605 wa_verify(wa, intel_gt_read_register_fw(gt, wa->reg),
1606 wal->name, "application");
1607 }
1608
1609 intel_uncore_forcewake_put__locked(uncore, fw);
1610 spin_unlock_irqrestore(&uncore->lock, flags);
1611 }
1612
intel_gt_apply_workarounds(struct intel_gt * gt)1613 void intel_gt_apply_workarounds(struct intel_gt *gt)
1614 {
1615 wa_list_apply(gt, >->wa_list);
1616 }
1617
wa_list_verify(struct intel_gt * gt,const struct i915_wa_list * wal,const char * from)1618 static bool wa_list_verify(struct intel_gt *gt,
1619 const struct i915_wa_list *wal,
1620 const char *from)
1621 {
1622 struct intel_uncore *uncore = gt->uncore;
1623 struct i915_wa *wa;
1624 enum forcewake_domains fw;
1625 unsigned long flags;
1626 unsigned int i;
1627 bool ok = true;
1628
1629 fw = wal_get_fw_for_rmw(uncore, wal);
1630
1631 spin_lock_irqsave(&uncore->lock, flags);
1632 intel_uncore_forcewake_get__locked(uncore, fw);
1633
1634 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1635 ok &= wa_verify(wa,
1636 intel_gt_read_register_fw(gt, wa->reg),
1637 wal->name, from);
1638
1639 intel_uncore_forcewake_put__locked(uncore, fw);
1640 spin_unlock_irqrestore(&uncore->lock, flags);
1641
1642 return ok;
1643 }
1644
intel_gt_verify_workarounds(struct intel_gt * gt,const char * from)1645 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1646 {
1647 return wa_list_verify(gt, >->wa_list, from);
1648 }
1649
1650 __maybe_unused
is_nonpriv_flags_valid(u32 flags)1651 static bool is_nonpriv_flags_valid(u32 flags)
1652 {
1653 /* Check only valid flag bits are set */
1654 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1655 return false;
1656
1657 /* NB: Only 3 out of 4 enum values are valid for access field */
1658 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1659 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1660 return false;
1661
1662 return true;
1663 }
1664
1665 static void
whitelist_reg_ext(struct i915_wa_list * wal,i915_reg_t reg,u32 flags)1666 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1667 {
1668 struct i915_wa wa = {
1669 .reg = reg
1670 };
1671
1672 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1673 return;
1674
1675 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1676 return;
1677
1678 wa.reg.reg |= flags;
1679 _wa_add(wal, &wa);
1680 }
1681
1682 static void
whitelist_reg(struct i915_wa_list * wal,i915_reg_t reg)1683 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1684 {
1685 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1686 }
1687
gen9_whitelist_build(struct i915_wa_list * w)1688 static void gen9_whitelist_build(struct i915_wa_list *w)
1689 {
1690 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1691 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1692
1693 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1694 whitelist_reg(w, GEN8_CS_CHICKEN1);
1695
1696 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1697 whitelist_reg(w, GEN8_HDC_CHICKEN1);
1698
1699 /* WaSendPushConstantsFromMMIO:skl,bxt */
1700 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1701 }
1702
skl_whitelist_build(struct intel_engine_cs * engine)1703 static void skl_whitelist_build(struct intel_engine_cs *engine)
1704 {
1705 struct i915_wa_list *w = &engine->whitelist;
1706
1707 if (engine->class != RENDER_CLASS)
1708 return;
1709
1710 gen9_whitelist_build(w);
1711
1712 /* WaDisableLSQCROPERFforOCL:skl */
1713 whitelist_reg(w, GEN8_L3SQCREG4);
1714 }
1715
bxt_whitelist_build(struct intel_engine_cs * engine)1716 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1717 {
1718 if (engine->class != RENDER_CLASS)
1719 return;
1720
1721 gen9_whitelist_build(&engine->whitelist);
1722 }
1723
kbl_whitelist_build(struct intel_engine_cs * engine)1724 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1725 {
1726 struct i915_wa_list *w = &engine->whitelist;
1727
1728 if (engine->class != RENDER_CLASS)
1729 return;
1730
1731 gen9_whitelist_build(w);
1732
1733 /* WaDisableLSQCROPERFforOCL:kbl */
1734 whitelist_reg(w, GEN8_L3SQCREG4);
1735 }
1736
glk_whitelist_build(struct intel_engine_cs * engine)1737 static void glk_whitelist_build(struct intel_engine_cs *engine)
1738 {
1739 struct i915_wa_list *w = &engine->whitelist;
1740
1741 if (engine->class != RENDER_CLASS)
1742 return;
1743
1744 gen9_whitelist_build(w);
1745
1746 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1747 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1748 }
1749
cfl_whitelist_build(struct intel_engine_cs * engine)1750 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1751 {
1752 struct i915_wa_list *w = &engine->whitelist;
1753
1754 if (engine->class != RENDER_CLASS)
1755 return;
1756
1757 gen9_whitelist_build(w);
1758
1759 /*
1760 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1761 *
1762 * This covers 4 register which are next to one another :
1763 * - PS_INVOCATION_COUNT
1764 * - PS_INVOCATION_COUNT_UDW
1765 * - PS_DEPTH_COUNT
1766 * - PS_DEPTH_COUNT_UDW
1767 */
1768 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1769 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1770 RING_FORCE_TO_NONPRIV_RANGE_4);
1771 }
1772
allow_read_ctx_timestamp(struct intel_engine_cs * engine)1773 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
1774 {
1775 struct i915_wa_list *w = &engine->whitelist;
1776
1777 if (engine->class != RENDER_CLASS)
1778 whitelist_reg_ext(w,
1779 RING_CTX_TIMESTAMP(engine->mmio_base),
1780 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1781 }
1782
cml_whitelist_build(struct intel_engine_cs * engine)1783 static void cml_whitelist_build(struct intel_engine_cs *engine)
1784 {
1785 allow_read_ctx_timestamp(engine);
1786
1787 cfl_whitelist_build(engine);
1788 }
1789
icl_whitelist_build(struct intel_engine_cs * engine)1790 static void icl_whitelist_build(struct intel_engine_cs *engine)
1791 {
1792 struct i915_wa_list *w = &engine->whitelist;
1793
1794 allow_read_ctx_timestamp(engine);
1795
1796 switch (engine->class) {
1797 case RENDER_CLASS:
1798 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1799 whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1800
1801 /* WaAllowUMDToModifySamplerMode:icl */
1802 whitelist_reg(w, GEN10_SAMPLER_MODE);
1803
1804 /* WaEnableStateCacheRedirectToCS:icl */
1805 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1806
1807 /*
1808 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1809 *
1810 * This covers 4 register which are next to one another :
1811 * - PS_INVOCATION_COUNT
1812 * - PS_INVOCATION_COUNT_UDW
1813 * - PS_DEPTH_COUNT
1814 * - PS_DEPTH_COUNT_UDW
1815 */
1816 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1817 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1818 RING_FORCE_TO_NONPRIV_RANGE_4);
1819 break;
1820
1821 case VIDEO_DECODE_CLASS:
1822 /* hucStatusRegOffset */
1823 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1824 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1825 /* hucUKernelHdrInfoRegOffset */
1826 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1827 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1828 /* hucStatus2RegOffset */
1829 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1830 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1831 break;
1832
1833 default:
1834 break;
1835 }
1836 }
1837
tgl_whitelist_build(struct intel_engine_cs * engine)1838 static void tgl_whitelist_build(struct intel_engine_cs *engine)
1839 {
1840 struct i915_wa_list *w = &engine->whitelist;
1841
1842 allow_read_ctx_timestamp(engine);
1843
1844 switch (engine->class) {
1845 case RENDER_CLASS:
1846 /*
1847 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
1848 * Wa_1408556865:tgl
1849 *
1850 * This covers 4 registers which are next to one another :
1851 * - PS_INVOCATION_COUNT
1852 * - PS_INVOCATION_COUNT_UDW
1853 * - PS_DEPTH_COUNT
1854 * - PS_DEPTH_COUNT_UDW
1855 */
1856 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1857 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1858 RING_FORCE_TO_NONPRIV_RANGE_4);
1859
1860 /*
1861 * Wa_1808121037:tgl
1862 * Wa_14012131227:dg1
1863 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
1864 */
1865 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
1866
1867 /* Wa_1806527549:tgl */
1868 whitelist_reg(w, HIZ_CHICKEN);
1869 break;
1870 default:
1871 break;
1872 }
1873 }
1874
dg1_whitelist_build(struct intel_engine_cs * engine)1875 static void dg1_whitelist_build(struct intel_engine_cs *engine)
1876 {
1877 struct i915_wa_list *w = &engine->whitelist;
1878
1879 tgl_whitelist_build(engine);
1880
1881 /* GEN:BUG:1409280441:dg1 */
1882 if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
1883 (engine->class == RENDER_CLASS ||
1884 engine->class == COPY_ENGINE_CLASS))
1885 whitelist_reg_ext(w, RING_ID(engine->mmio_base),
1886 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1887 }
1888
xehpsdv_whitelist_build(struct intel_engine_cs * engine)1889 static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
1890 {
1891 allow_read_ctx_timestamp(engine);
1892 }
1893
dg2_whitelist_build(struct intel_engine_cs * engine)1894 static void dg2_whitelist_build(struct intel_engine_cs *engine)
1895 {
1896 struct i915_wa_list *w = &engine->whitelist;
1897
1898 allow_read_ctx_timestamp(engine);
1899
1900 switch (engine->class) {
1901 case RENDER_CLASS:
1902 /*
1903 * Wa_1507100340:dg2_g10
1904 *
1905 * This covers 4 registers which are next to one another :
1906 * - PS_INVOCATION_COUNT
1907 * - PS_INVOCATION_COUNT_UDW
1908 * - PS_DEPTH_COUNT
1909 * - PS_DEPTH_COUNT_UDW
1910 */
1911 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
1912 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1913 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1914 RING_FORCE_TO_NONPRIV_RANGE_4);
1915
1916 break;
1917 case COMPUTE_CLASS:
1918 /* Wa_16011157294:dg2_g10 */
1919 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
1920 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1921 break;
1922 default:
1923 break;
1924 }
1925 }
1926
intel_engine_init_whitelist(struct intel_engine_cs * engine)1927 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1928 {
1929 struct drm_i915_private *i915 = engine->i915;
1930 struct i915_wa_list *w = &engine->whitelist;
1931
1932 wa_init_start(w, "whitelist", engine->name);
1933
1934 if (IS_DG2(i915))
1935 dg2_whitelist_build(engine);
1936 else if (IS_XEHPSDV(i915))
1937 xehpsdv_whitelist_build(engine);
1938 else if (IS_DG1(i915))
1939 dg1_whitelist_build(engine);
1940 else if (GRAPHICS_VER(i915) == 12)
1941 tgl_whitelist_build(engine);
1942 else if (GRAPHICS_VER(i915) == 11)
1943 icl_whitelist_build(engine);
1944 else if (IS_COMETLAKE(i915))
1945 cml_whitelist_build(engine);
1946 else if (IS_COFFEELAKE(i915))
1947 cfl_whitelist_build(engine);
1948 else if (IS_GEMINILAKE(i915))
1949 glk_whitelist_build(engine);
1950 else if (IS_KABYLAKE(i915))
1951 kbl_whitelist_build(engine);
1952 else if (IS_BROXTON(i915))
1953 bxt_whitelist_build(engine);
1954 else if (IS_SKYLAKE(i915))
1955 skl_whitelist_build(engine);
1956 else if (GRAPHICS_VER(i915) <= 8)
1957 ;
1958 else
1959 MISSING_CASE(GRAPHICS_VER(i915));
1960
1961 wa_init_finish(w);
1962 }
1963
intel_engine_apply_whitelist(struct intel_engine_cs * engine)1964 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1965 {
1966 const struct i915_wa_list *wal = &engine->whitelist;
1967 struct intel_uncore *uncore = engine->uncore;
1968 const u32 base = engine->mmio_base;
1969 struct i915_wa *wa;
1970 unsigned int i;
1971
1972 if (!wal->count)
1973 return;
1974
1975 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1976 intel_uncore_write(uncore,
1977 RING_FORCE_TO_NONPRIV(base, i),
1978 i915_mmio_reg_offset(wa->reg));
1979
1980 /* And clear the rest just in case of garbage */
1981 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1982 intel_uncore_write(uncore,
1983 RING_FORCE_TO_NONPRIV(base, i),
1984 i915_mmio_reg_offset(RING_NOPID(base)));
1985 }
1986
1987 /*
1988 * engine_fake_wa_init(), a place holder to program the registers
1989 * which are not part of an official workaround defined by the
1990 * hardware team.
1991 * Adding programming of those register inside workaround will
1992 * allow utilizing wa framework to proper application and verification.
1993 */
1994 static void
engine_fake_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)1995 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1996 {
1997 u8 mocs;
1998
1999 /*
2000 * RING_CMD_CCTL are need to be programed to un-cached
2001 * for memory writes and reads outputted by Command
2002 * Streamers on Gen12 onward platforms.
2003 */
2004 if (GRAPHICS_VER(engine->i915) >= 12) {
2005 mocs = engine->gt->mocs.uc_index;
2006 wa_masked_field_set(wal,
2007 RING_CMD_CCTL(engine->mmio_base),
2008 CMD_CCTL_MOCS_MASK,
2009 CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
2010 }
2011 }
2012
needs_wa_1308578152(struct intel_engine_cs * engine)2013 static bool needs_wa_1308578152(struct intel_engine_cs *engine)
2014 {
2015 u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0);
2016
2017 return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0;
2018 }
2019
2020 static void
rcs_engine_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2021 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2022 {
2023 struct drm_i915_private *i915 = engine->i915;
2024
2025 if (IS_DG2(i915)) {
2026 /* Wa_14015227452:dg2 */
2027 wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2028
2029 /* Wa_1509235366:dg2 */
2030 wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
2031 GLOBAL_INVALIDATION_MODE);
2032
2033 /*
2034 * The following are not actually "workarounds" but rather
2035 * recommended tuning settings documented in the bspec's
2036 * performance guide section.
2037 */
2038 wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
2039
2040 /* Wa_18018781329:dg2 */
2041 wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
2042 wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
2043 wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
2044 wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
2045 }
2046
2047 if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
2048 /* Wa_14013392000:dg2_g11 */
2049 wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
2050
2051 /* Wa_16011620976:dg2_g11 */
2052 wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2053 }
2054
2055 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
2056 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
2057 /* Wa_14012419201:dg2 */
2058 wa_masked_en(wal, GEN9_ROW_CHICKEN4,
2059 GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
2060 }
2061
2062 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
2063 IS_DG2_G11(i915)) {
2064 /*
2065 * Wa_22012826095:dg2
2066 * Wa_22013059131:dg2
2067 */
2068 wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2069 MAXREQS_PER_BANK,
2070 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2071
2072 /* Wa_22013059131:dg2 */
2073 wa_write_or(wal, LSC_CHICKEN_BIT_0,
2074 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2075 }
2076
2077 /* Wa_1308578152:dg2_g10 when first gslice is fused off */
2078 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
2079 needs_wa_1308578152(engine)) {
2080 wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
2081 GEN12_REPLAY_MODE_GRANULARITY);
2082 }
2083
2084 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
2085 IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
2086 /* Wa_22013037850:dg2 */
2087 wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2088 DISABLE_128B_EVICTION_COMMAND_UDW);
2089
2090 /* Wa_22012856258:dg2 */
2091 wa_masked_en(wal, GEN7_ROW_CHICKEN2,
2092 GEN12_DISABLE_READ_SUPPRESSION);
2093
2094 /*
2095 * Wa_22010960976:dg2
2096 * Wa_14013347512:dg2
2097 */
2098 wa_masked_dis(wal, GEN12_HDC_CHICKEN0,
2099 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2100 }
2101
2102 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
2103 /*
2104 * Wa_1608949956:dg2_g10
2105 * Wa_14010198302:dg2_g10
2106 */
2107 wa_masked_en(wal, GEN8_ROW_CHICKEN,
2108 MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
2109
2110 /*
2111 * Wa_14010918519:dg2_g10
2112 *
2113 * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
2114 * so ignoring verification.
2115 */
2116 wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
2117 FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
2118 0, false);
2119 }
2120
2121 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
2122 /* Wa_22010430635:dg2 */
2123 wa_masked_en(wal,
2124 GEN9_ROW_CHICKEN4,
2125 GEN12_DISABLE_GRF_CLEAR);
2126
2127 /* Wa_14010648519:dg2 */
2128 wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
2129 }
2130
2131 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
2132 IS_DG2_G11(i915)) {
2133 /* Wa_22012654132:dg2 */
2134 wa_add(wal, GEN10_CACHE_MODE_SS, 0,
2135 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2136 0 /* write-only, so skip validation */,
2137 true);
2138 }
2139
2140 /* Wa_14013202645:dg2 */
2141 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
2142 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
2143 wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
2144
2145 /* Wa_22012532006:dg2 */
2146 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
2147 IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
2148 wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
2149 DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
2150
2151 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
2152 /* Wa_14010680813:dg2_g10 */
2153 wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
2154 EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
2155 }
2156
2157 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
2158 IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
2159 /* Wa_14012362059:dg2 */
2160 wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
2161 }
2162
2163 if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
2164 IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
2165 /*
2166 * Wa_1607138336:tgl[a0],dg1[a0]
2167 * Wa_1607063988:tgl[a0],dg1[a0]
2168 */
2169 wa_write_or(wal,
2170 GEN9_CTX_PREEMPT_REG,
2171 GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
2172 }
2173
2174 if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
2175 /*
2176 * Wa_1606679103:tgl
2177 * (see also Wa_1606682166:icl)
2178 */
2179 wa_write_or(wal,
2180 GEN7_SARCHKMD,
2181 GEN7_DISABLE_SAMPLER_PREFETCH);
2182 }
2183
2184 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2185 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2186 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2187 wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2188
2189 /*
2190 * Wa_1407928979:tgl A*
2191 * Wa_18011464164:tgl[B0+],dg1[B0+]
2192 * Wa_22010931296:tgl[B0+],dg1[B0+]
2193 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2194 */
2195 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2196 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2197 }
2198
2199 if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
2200 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2201 /*
2202 * Wa_1606700617:tgl,dg1,adl-p
2203 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2204 * Wa_14010826681:tgl,dg1,rkl,adl-p
2205 * Wa_18019627453:dg2
2206 */
2207 wa_masked_en(wal,
2208 GEN9_CS_DEBUG_MODE1,
2209 FF_DOP_CLOCK_GATE_DISABLE);
2210 }
2211
2212 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2213 IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
2214 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2215 /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
2216 wa_masked_en(wal, GEN7_ROW_CHICKEN2,
2217 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2218
2219 /*
2220 * Wa_1409085225:tgl
2221 * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
2222 */
2223 wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2224 }
2225
2226 if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
2227 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2228 /*
2229 * Wa_1607030317:tgl
2230 * Wa_1607186500:tgl
2231 * Wa_1607297627:tgl,rkl,dg1[a0]
2232 *
2233 * On TGL and RKL there are multiple entries for this WA in the
2234 * BSpec; some indicate this is an A0-only WA, others indicate
2235 * it applies to all steppings so we trust the "all steppings."
2236 * For DG1 this only applies to A0.
2237 */
2238 wa_masked_en(wal,
2239 RING_PSMI_CTL(RENDER_RING_BASE),
2240 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2241 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2242 }
2243
2244 if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
2245 IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
2246 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2247 wa_masked_en(wal,
2248 GEN10_SAMPLER_MODE,
2249 ENABLE_SMALLPL);
2250 }
2251
2252 if (GRAPHICS_VER(i915) == 11) {
2253 /* This is not an Wa. Enable for better image quality */
2254 wa_masked_en(wal,
2255 _3D_CHICKEN3,
2256 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2257
2258 /*
2259 * Wa_1405543622:icl
2260 * Formerly known as WaGAPZPriorityScheme
2261 */
2262 wa_write_or(wal,
2263 GEN8_GARBCNTL,
2264 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2265
2266 /*
2267 * Wa_1604223664:icl
2268 * Formerly known as WaL3BankAddressHashing
2269 */
2270 wa_write_clr_set(wal,
2271 GEN8_GARBCNTL,
2272 GEN11_HASH_CTRL_EXCL_MASK,
2273 GEN11_HASH_CTRL_EXCL_BIT0);
2274 wa_write_clr_set(wal,
2275 GEN11_GLBLINVL,
2276 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2277 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2278
2279 /*
2280 * Wa_1405733216:icl
2281 * Formerly known as WaDisableCleanEvicts
2282 */
2283 wa_write_or(wal,
2284 GEN8_L3SQCREG4,
2285 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2286
2287 /* Wa_1606682166:icl */
2288 wa_write_or(wal,
2289 GEN7_SARCHKMD,
2290 GEN7_DISABLE_SAMPLER_PREFETCH);
2291
2292 /* Wa_1409178092:icl */
2293 wa_write_clr_set(wal,
2294 GEN11_SCRATCH2,
2295 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2296 0);
2297
2298 /* WaEnable32PlaneMode:icl */
2299 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2300 GEN11_ENABLE_32_PLANE_MODE);
2301
2302 /*
2303 * Wa_1408615072:icl,ehl (vsunit)
2304 * Wa_1407596294:icl,ehl (hsunit)
2305 */
2306 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
2307 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
2308
2309 /*
2310 * Wa_1408767742:icl[a2..forever],ehl[all]
2311 * Wa_1605460711:icl[a0..c0]
2312 */
2313 wa_write_or(wal,
2314 GEN7_FF_THREAD_MODE,
2315 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2316
2317 /* Wa_22010271021 */
2318 wa_masked_en(wal,
2319 GEN9_CS_DEBUG_MODE1,
2320 FF_DOP_CLOCK_GATE_DISABLE);
2321 }
2322
2323 if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
2324 /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
2325 wa_masked_en(wal,
2326 GEN7_FF_SLICE_CS_CHICKEN1,
2327 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2328 }
2329
2330 if (IS_SKYLAKE(i915) ||
2331 IS_KABYLAKE(i915) ||
2332 IS_COFFEELAKE(i915) ||
2333 IS_COMETLAKE(i915)) {
2334 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2335 wa_write_or(wal,
2336 GEN8_GARBCNTL,
2337 GEN9_GAPS_TSV_CREDIT_DISABLE);
2338 }
2339
2340 if (IS_BROXTON(i915)) {
2341 /* WaDisablePooledEuLoadBalancingFix:bxt */
2342 wa_masked_en(wal,
2343 FF_SLICE_CS_CHICKEN2,
2344 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2345 }
2346
2347 if (GRAPHICS_VER(i915) == 9) {
2348 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2349 wa_masked_en(wal,
2350 GEN9_CSFE_CHICKEN1_RCS,
2351 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2352
2353 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2354 wa_write_or(wal,
2355 BDW_SCRATCH1,
2356 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2357
2358 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2359 if (IS_GEN9_LP(i915))
2360 wa_write_clr_set(wal,
2361 GEN8_L3SQCREG1,
2362 L3_PRIO_CREDITS_MASK,
2363 L3_GENERAL_PRIO_CREDITS(62) |
2364 L3_HIGH_PRIO_CREDITS(2));
2365
2366 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2367 wa_write_or(wal,
2368 GEN8_L3SQCREG4,
2369 GEN8_LQSC_FLUSH_COHERENT_LINES);
2370
2371 /* Disable atomics in L3 to prevent unrecoverable hangs */
2372 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2373 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2374 wa_write_clr_set(wal, GEN8_L3SQCREG4,
2375 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2376 wa_write_clr_set(wal, GEN9_SCRATCH1,
2377 EVICTION_PERF_FIX_ENABLE, 0);
2378 }
2379
2380 if (IS_HASWELL(i915)) {
2381 /* WaSampleCChickenBitEnable:hsw */
2382 wa_masked_en(wal,
2383 HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2384
2385 wa_masked_dis(wal,
2386 CACHE_MODE_0_GEN7,
2387 /* enable HiZ Raw Stall Optimization */
2388 HIZ_RAW_STALL_OPT_DISABLE);
2389 }
2390
2391 if (IS_VALLEYVIEW(i915)) {
2392 /* WaDisableEarlyCull:vlv */
2393 wa_masked_en(wal,
2394 _3D_CHICKEN3,
2395 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2396
2397 /*
2398 * WaVSThreadDispatchOverride:ivb,vlv
2399 *
2400 * This actually overrides the dispatch
2401 * mode for all thread types.
2402 */
2403 wa_write_clr_set(wal,
2404 GEN7_FF_THREAD_MODE,
2405 GEN7_FF_SCHED_MASK,
2406 GEN7_FF_TS_SCHED_HW |
2407 GEN7_FF_VS_SCHED_HW |
2408 GEN7_FF_DS_SCHED_HW);
2409
2410 /* WaPsdDispatchEnable:vlv */
2411 /* WaDisablePSDDualDispatchEnable:vlv */
2412 wa_masked_en(wal,
2413 GEN7_HALF_SLICE_CHICKEN1,
2414 GEN7_MAX_PS_THREAD_DEP |
2415 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2416 }
2417
2418 if (IS_IVYBRIDGE(i915)) {
2419 /* WaDisableEarlyCull:ivb */
2420 wa_masked_en(wal,
2421 _3D_CHICKEN3,
2422 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2423
2424 if (0) { /* causes HiZ corruption on ivb:gt1 */
2425 /* enable HiZ Raw Stall Optimization */
2426 wa_masked_dis(wal,
2427 CACHE_MODE_0_GEN7,
2428 HIZ_RAW_STALL_OPT_DISABLE);
2429 }
2430
2431 /*
2432 * WaVSThreadDispatchOverride:ivb,vlv
2433 *
2434 * This actually overrides the dispatch
2435 * mode for all thread types.
2436 */
2437 wa_write_clr_set(wal,
2438 GEN7_FF_THREAD_MODE,
2439 GEN7_FF_SCHED_MASK,
2440 GEN7_FF_TS_SCHED_HW |
2441 GEN7_FF_VS_SCHED_HW |
2442 GEN7_FF_DS_SCHED_HW);
2443
2444 /* WaDisablePSDDualDispatchEnable:ivb */
2445 if (IS_IVB_GT1(i915))
2446 wa_masked_en(wal,
2447 GEN7_HALF_SLICE_CHICKEN1,
2448 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2449 }
2450
2451 if (GRAPHICS_VER(i915) == 7) {
2452 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2453 wa_masked_en(wal,
2454 RING_MODE_GEN7(RENDER_RING_BASE),
2455 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2456
2457 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2458 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2459
2460 /*
2461 * BSpec says this must be set, even though
2462 * WaDisable4x2SubspanOptimization:ivb,hsw
2463 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2464 */
2465 wa_masked_en(wal,
2466 CACHE_MODE_1,
2467 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2468
2469 /*
2470 * BSpec recommends 8x4 when MSAA is used,
2471 * however in practice 16x4 seems fastest.
2472 *
2473 * Note that PS/WM thread counts depend on the WIZ hashing
2474 * disable bit, which we don't touch here, but it's good
2475 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2476 */
2477 wa_masked_field_set(wal,
2478 GEN7_GT_MODE,
2479 GEN6_WIZ_HASHING_MASK,
2480 GEN6_WIZ_HASHING_16x4);
2481 }
2482
2483 if (IS_GRAPHICS_VER(i915, 6, 7))
2484 /*
2485 * We need to disable the AsyncFlip performance optimisations in
2486 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2487 * already be programmed to '1' on all products.
2488 *
2489 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2490 */
2491 wa_masked_en(wal,
2492 RING_MI_MODE(RENDER_RING_BASE),
2493 ASYNC_FLIP_PERF_DISABLE);
2494
2495 if (GRAPHICS_VER(i915) == 6) {
2496 /*
2497 * Required for the hardware to program scanline values for
2498 * waiting
2499 * WaEnableFlushTlbInvalidationMode:snb
2500 */
2501 wa_masked_en(wal,
2502 GFX_MODE,
2503 GFX_TLB_INVALIDATE_EXPLICIT);
2504
2505 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2506 wa_masked_en(wal,
2507 _3D_CHICKEN,
2508 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2509
2510 wa_masked_en(wal,
2511 _3D_CHICKEN3,
2512 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2513 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2514 /*
2515 * Bspec says:
2516 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2517 * to normal and 3DSTATE_SF number of SF output attributes
2518 * is more than 16."
2519 */
2520 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2521
2522 /*
2523 * BSpec recommends 8x4 when MSAA is used,
2524 * however in practice 16x4 seems fastest.
2525 *
2526 * Note that PS/WM thread counts depend on the WIZ hashing
2527 * disable bit, which we don't touch here, but it's good
2528 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2529 */
2530 wa_masked_field_set(wal,
2531 GEN6_GT_MODE,
2532 GEN6_WIZ_HASHING_MASK,
2533 GEN6_WIZ_HASHING_16x4);
2534
2535 /* WaDisable_RenderCache_OperationalFlush:snb */
2536 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2537
2538 /*
2539 * From the Sandybridge PRM, volume 1 part 3, page 24:
2540 * "If this bit is set, STCunit will have LRA as replacement
2541 * policy. [...] This bit must be reset. LRA replacement
2542 * policy is not supported."
2543 */
2544 wa_masked_dis(wal,
2545 CACHE_MODE_0,
2546 CM0_STC_EVICT_DISABLE_LRA_SNB);
2547 }
2548
2549 if (IS_GRAPHICS_VER(i915, 4, 6))
2550 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2551 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2552 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2553 /* XXX bit doesn't stick on Broadwater */
2554 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2555
2556 if (GRAPHICS_VER(i915) == 4)
2557 /*
2558 * Disable CONSTANT_BUFFER before it is loaded from the context
2559 * image. For as it is loaded, it is executed and the stored
2560 * address may no longer be valid, leading to a GPU hang.
2561 *
2562 * This imposes the requirement that userspace reload their
2563 * CONSTANT_BUFFER on every batch, fortunately a requirement
2564 * they are already accustomed to from before contexts were
2565 * enabled.
2566 */
2567 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2568 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2569 0 /* XXX bit doesn't stick on Broadwater */,
2570 true);
2571 }
2572
2573 static void
xcs_engine_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2574 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2575 {
2576 struct drm_i915_private *i915 = engine->i915;
2577
2578 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2579 if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2580 wa_write(wal,
2581 RING_SEMA_WAIT_POLL(engine->mmio_base),
2582 1);
2583 }
2584 }
2585
2586 /*
2587 * The workarounds in this function apply to shared registers in
2588 * the general render reset domain that aren't tied to a
2589 * specific engine. Since all render+compute engines get reset
2590 * together, and the contents of these registers are lost during
2591 * the shared render domain reset, we'll define such workarounds
2592 * here and then add them to just a single RCS or CCS engine's
2593 * workaround list (whichever engine has the XXXX flag).
2594 */
2595 static void
general_render_compute_wa_init(struct intel_engine_cs * engine,struct i915_wa_list * wal)2596 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2597 {
2598 struct drm_i915_private *i915 = engine->i915;
2599
2600 if (IS_XEHPSDV(i915)) {
2601 /* Wa_1409954639 */
2602 wa_masked_en(wal,
2603 GEN8_ROW_CHICKEN,
2604 SYSTOLIC_DOP_CLOCK_GATING_DIS);
2605
2606 /* Wa_1607196519 */
2607 wa_masked_en(wal,
2608 GEN9_ROW_CHICKEN4,
2609 GEN12_DISABLE_GRF_CLEAR);
2610
2611 /* Wa_14010670810:xehpsdv */
2612 wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
2613
2614 /* Wa_14010449647:xehpsdv */
2615 wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
2616 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2617
2618 /* Wa_18011725039:xehpsdv */
2619 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
2620 wa_masked_dis(wal, MLTICTXCTL, TDONRENDER);
2621 wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
2622 }
2623
2624 /* Wa_14012362059:xehpsdv */
2625 wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
2626
2627 /* Wa_14014368820:xehpsdv */
2628 wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
2629 GLOBAL_INVALIDATION_MODE);
2630 }
2631
2632 if (IS_DG2(i915)) {
2633 /* Wa_22014226127:dg2 */
2634 wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2635 }
2636 }
2637
2638 static void
engine_init_workarounds(struct intel_engine_cs * engine,struct i915_wa_list * wal)2639 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2640 {
2641 if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
2642 return;
2643
2644 engine_fake_wa_init(engine, wal);
2645
2646 /*
2647 * These are common workarounds that just need to applied
2648 * to a single RCS/CCS engine's workaround list since
2649 * they're reset as part of the general render domain reset.
2650 */
2651 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
2652 general_render_compute_wa_init(engine, wal);
2653
2654 if (engine->class == RENDER_CLASS)
2655 rcs_engine_wa_init(engine, wal);
2656 else
2657 xcs_engine_wa_init(engine, wal);
2658 }
2659
intel_engine_init_workarounds(struct intel_engine_cs * engine)2660 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2661 {
2662 struct i915_wa_list *wal = &engine->wa_list;
2663
2664 if (GRAPHICS_VER(engine->i915) < 4)
2665 return;
2666
2667 wa_init_start(wal, "engine", engine->name);
2668 engine_init_workarounds(engine, wal);
2669 wa_init_finish(wal);
2670 }
2671
intel_engine_apply_workarounds(struct intel_engine_cs * engine)2672 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
2673 {
2674 wa_list_apply(engine->gt, &engine->wa_list);
2675 }
2676
2677 static const struct i915_range mcr_ranges_gen8[] = {
2678 { .start = 0x5500, .end = 0x55ff },
2679 { .start = 0x7000, .end = 0x7fff },
2680 { .start = 0x9400, .end = 0x97ff },
2681 { .start = 0xb000, .end = 0xb3ff },
2682 { .start = 0xe000, .end = 0xe7ff },
2683 {},
2684 };
2685
2686 static const struct i915_range mcr_ranges_gen12[] = {
2687 { .start = 0x8150, .end = 0x815f },
2688 { .start = 0x9520, .end = 0x955f },
2689 { .start = 0xb100, .end = 0xb3ff },
2690 { .start = 0xde80, .end = 0xe8ff },
2691 { .start = 0x24a00, .end = 0x24a7f },
2692 {},
2693 };
2694
2695 static const struct i915_range mcr_ranges_xehp[] = {
2696 { .start = 0x4000, .end = 0x4aff },
2697 { .start = 0x5200, .end = 0x52ff },
2698 { .start = 0x5400, .end = 0x7fff },
2699 { .start = 0x8140, .end = 0x815f },
2700 { .start = 0x8c80, .end = 0x8dff },
2701 { .start = 0x94d0, .end = 0x955f },
2702 { .start = 0x9680, .end = 0x96ff },
2703 { .start = 0xb000, .end = 0xb3ff },
2704 { .start = 0xc800, .end = 0xcfff },
2705 { .start = 0xd800, .end = 0xd8ff },
2706 { .start = 0xdc00, .end = 0xffff },
2707 { .start = 0x17000, .end = 0x17fff },
2708 { .start = 0x24a00, .end = 0x24a7f },
2709 {},
2710 };
2711
mcr_range(struct drm_i915_private * i915,u32 offset)2712 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
2713 {
2714 const struct i915_range *mcr_ranges;
2715 int i;
2716
2717 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
2718 mcr_ranges = mcr_ranges_xehp;
2719 else if (GRAPHICS_VER(i915) >= 12)
2720 mcr_ranges = mcr_ranges_gen12;
2721 else if (GRAPHICS_VER(i915) >= 8)
2722 mcr_ranges = mcr_ranges_gen8;
2723 else
2724 return false;
2725
2726 /*
2727 * Registers in these ranges are affected by the MCR selector
2728 * which only controls CPU initiated MMIO. Routing does not
2729 * work for CS access so we cannot verify them on this path.
2730 */
2731 for (i = 0; mcr_ranges[i].start; i++)
2732 if (offset >= mcr_ranges[i].start &&
2733 offset <= mcr_ranges[i].end)
2734 return true;
2735
2736 return false;
2737 }
2738
2739 static int
wa_list_srm(struct i915_request * rq,const struct i915_wa_list * wal,struct i915_vma * vma)2740 wa_list_srm(struct i915_request *rq,
2741 const struct i915_wa_list *wal,
2742 struct i915_vma *vma)
2743 {
2744 struct drm_i915_private *i915 = rq->engine->i915;
2745 unsigned int i, count = 0;
2746 const struct i915_wa *wa;
2747 u32 srm, *cs;
2748
2749 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
2750 if (GRAPHICS_VER(i915) >= 8)
2751 srm++;
2752
2753 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2754 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
2755 count++;
2756 }
2757
2758 cs = intel_ring_begin(rq, 4 * count);
2759 if (IS_ERR(cs))
2760 return PTR_ERR(cs);
2761
2762 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2763 u32 offset = i915_mmio_reg_offset(wa->reg);
2764
2765 if (mcr_range(i915, offset))
2766 continue;
2767
2768 *cs++ = srm;
2769 *cs++ = offset;
2770 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
2771 *cs++ = 0;
2772 }
2773 intel_ring_advance(rq, cs);
2774
2775 return 0;
2776 }
2777
engine_wa_list_verify(struct intel_context * ce,const struct i915_wa_list * const wal,const char * from)2778 static int engine_wa_list_verify(struct intel_context *ce,
2779 const struct i915_wa_list * const wal,
2780 const char *from)
2781 {
2782 const struct i915_wa *wa;
2783 struct i915_request *rq;
2784 struct i915_vma *vma;
2785 struct i915_gem_ww_ctx ww;
2786 unsigned int i;
2787 u32 *results;
2788 int err;
2789
2790 if (!wal->count)
2791 return 0;
2792
2793 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
2794 wal->count * sizeof(u32));
2795 if (IS_ERR(vma))
2796 return PTR_ERR(vma);
2797
2798 intel_engine_pm_get(ce->engine);
2799 i915_gem_ww_ctx_init(&ww, false);
2800 retry:
2801 err = i915_gem_object_lock(vma->obj, &ww);
2802 if (err == 0)
2803 err = intel_context_pin_ww(ce, &ww);
2804 if (err)
2805 goto err_pm;
2806
2807 err = i915_vma_pin_ww(vma, &ww, 0, 0,
2808 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
2809 if (err)
2810 goto err_unpin;
2811
2812 rq = i915_request_create(ce);
2813 if (IS_ERR(rq)) {
2814 err = PTR_ERR(rq);
2815 goto err_vma;
2816 }
2817
2818 err = i915_request_await_object(rq, vma->obj, true);
2819 if (err == 0)
2820 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
2821 if (err == 0)
2822 err = wa_list_srm(rq, wal, vma);
2823
2824 i915_request_get(rq);
2825 if (err)
2826 i915_request_set_error_once(rq, err);
2827 i915_request_add(rq);
2828
2829 if (err)
2830 goto err_rq;
2831
2832 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2833 err = -ETIME;
2834 goto err_rq;
2835 }
2836
2837 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
2838 if (IS_ERR(results)) {
2839 err = PTR_ERR(results);
2840 goto err_rq;
2841 }
2842
2843 err = 0;
2844 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2845 if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
2846 continue;
2847
2848 if (!wa_verify(wa, results[i], wal->name, from))
2849 err = -ENXIO;
2850 }
2851
2852 i915_gem_object_unpin_map(vma->obj);
2853
2854 err_rq:
2855 i915_request_put(rq);
2856 err_vma:
2857 i915_vma_unpin(vma);
2858 err_unpin:
2859 intel_context_unpin(ce);
2860 err_pm:
2861 if (err == -EDEADLK) {
2862 err = i915_gem_ww_ctx_backoff(&ww);
2863 if (!err)
2864 goto retry;
2865 }
2866 i915_gem_ww_ctx_fini(&ww);
2867 intel_engine_pm_put(ce->engine);
2868 i915_vma_put(vma);
2869 return err;
2870 }
2871
intel_engine_verify_workarounds(struct intel_engine_cs * engine,const char * from)2872 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
2873 const char *from)
2874 {
2875 return engine_wa_list_verify(engine->kernel_context,
2876 &engine->wa_list,
2877 from);
2878 }
2879
2880 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2881 #include "selftest_workarounds.c"
2882 #endif
2883