1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
8 
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "pxp/intel_pxp.h"
12 
13 #include "i915_drv.h"
14 #include "i915_perf_oa_regs.h"
15 #include "intel_context.h"
16 #include "intel_engine_pm.h"
17 #include "intel_engine_regs.h"
18 #include "intel_ggtt_gmch.h"
19 #include "intel_gt.h"
20 #include "intel_gt_buffer_pool.h"
21 #include "intel_gt_clock_utils.h"
22 #include "intel_gt_debugfs.h"
23 #include "intel_gt_mcr.h"
24 #include "intel_gt_pm.h"
25 #include "intel_gt_regs.h"
26 #include "intel_gt_requests.h"
27 #include "intel_migrate.h"
28 #include "intel_mocs.h"
29 #include "intel_pci_config.h"
30 #include "intel_pm.h"
31 #include "intel_rc6.h"
32 #include "intel_renderstate.h"
33 #include "intel_rps.h"
34 #include "intel_sa_media.h"
35 #include "intel_gt_sysfs.h"
36 #include "intel_uncore.h"
37 #include "shmem_utils.h"
38 
intel_gt_common_init_early(struct intel_gt * gt)39 void intel_gt_common_init_early(struct intel_gt *gt)
40 {
41 	spin_lock_init(gt->irq_lock);
42 
43 	INIT_LIST_HEAD(&gt->closed_vma);
44 	spin_lock_init(&gt->closed_lock);
45 
46 	init_llist_head(&gt->watchdog.list);
47 	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
48 
49 	intel_gt_init_buffer_pool(gt);
50 	intel_gt_init_reset(gt);
51 	intel_gt_init_requests(gt);
52 	intel_gt_init_timelines(gt);
53 	mutex_init(&gt->tlb.invalidate_lock);
54 	seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
55 	intel_gt_pm_init_early(gt);
56 
57 	intel_uc_init_early(&gt->uc);
58 	intel_rps_init_early(&gt->rps);
59 }
60 
61 /* Preliminary initialization of Tile 0 */
intel_root_gt_init_early(struct drm_i915_private * i915)62 int intel_root_gt_init_early(struct drm_i915_private *i915)
63 {
64 	struct intel_gt *gt = to_gt(i915);
65 
66 	gt->i915 = i915;
67 	gt->uncore = &i915->uncore;
68 	gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
69 	if (!gt->irq_lock)
70 		return -ENOMEM;
71 
72 	intel_gt_common_init_early(gt);
73 
74 	return 0;
75 }
76 
intel_gt_probe_lmem(struct intel_gt * gt)77 static int intel_gt_probe_lmem(struct intel_gt *gt)
78 {
79 	struct drm_i915_private *i915 = gt->i915;
80 	unsigned int instance = gt->info.id;
81 	int id = INTEL_REGION_LMEM_0 + instance;
82 	struct intel_memory_region *mem;
83 	int err;
84 
85 	mem = intel_gt_setup_lmem(gt);
86 	if (IS_ERR(mem)) {
87 		err = PTR_ERR(mem);
88 		if (err == -ENODEV)
89 			return 0;
90 
91 		drm_err(&i915->drm,
92 			"Failed to setup region(%d) type=%d\n",
93 			err, INTEL_MEMORY_LOCAL);
94 		return err;
95 	}
96 
97 	mem->id = id;
98 	mem->instance = instance;
99 
100 	intel_memory_region_set_name(mem, "local%u", mem->instance);
101 
102 	GEM_BUG_ON(!HAS_REGION(i915, id));
103 	GEM_BUG_ON(i915->mm.regions[id]);
104 	i915->mm.regions[id] = mem;
105 
106 	return 0;
107 }
108 
intel_gt_assign_ggtt(struct intel_gt * gt)109 int intel_gt_assign_ggtt(struct intel_gt *gt)
110 {
111 	gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
112 
113 	return gt->ggtt ? 0 : -ENOMEM;
114 }
115 
intel_gt_init_mmio(struct intel_gt * gt)116 int intel_gt_init_mmio(struct intel_gt *gt)
117 {
118 	intel_gt_init_clock_frequency(gt);
119 
120 	intel_uc_init_mmio(&gt->uc);
121 	intel_sseu_info_init(gt);
122 	intel_gt_mcr_init(gt);
123 
124 	return intel_engines_init_mmio(gt);
125 }
126 
init_unused_ring(struct intel_gt * gt,u32 base)127 static void init_unused_ring(struct intel_gt *gt, u32 base)
128 {
129 	struct intel_uncore *uncore = gt->uncore;
130 
131 	intel_uncore_write(uncore, RING_CTL(base), 0);
132 	intel_uncore_write(uncore, RING_HEAD(base), 0);
133 	intel_uncore_write(uncore, RING_TAIL(base), 0);
134 	intel_uncore_write(uncore, RING_START(base), 0);
135 }
136 
init_unused_rings(struct intel_gt * gt)137 static void init_unused_rings(struct intel_gt *gt)
138 {
139 	struct drm_i915_private *i915 = gt->i915;
140 
141 	if (IS_I830(i915)) {
142 		init_unused_ring(gt, PRB1_BASE);
143 		init_unused_ring(gt, SRB0_BASE);
144 		init_unused_ring(gt, SRB1_BASE);
145 		init_unused_ring(gt, SRB2_BASE);
146 		init_unused_ring(gt, SRB3_BASE);
147 	} else if (GRAPHICS_VER(i915) == 2) {
148 		init_unused_ring(gt, SRB0_BASE);
149 		init_unused_ring(gt, SRB1_BASE);
150 	} else if (GRAPHICS_VER(i915) == 3) {
151 		init_unused_ring(gt, PRB1_BASE);
152 		init_unused_ring(gt, PRB2_BASE);
153 	}
154 }
155 
intel_gt_init_hw(struct intel_gt * gt)156 int intel_gt_init_hw(struct intel_gt *gt)
157 {
158 	struct drm_i915_private *i915 = gt->i915;
159 	struct intel_uncore *uncore = gt->uncore;
160 	int ret;
161 
162 	gt->last_init_time = ktime_get();
163 
164 	/* Double layer security blanket, see i915_gem_init() */
165 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
166 
167 	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
168 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
169 
170 	if (IS_HASWELL(i915))
171 		intel_uncore_write(uncore,
172 				   HSW_MI_PREDICATE_RESULT_2,
173 				   IS_HSW_GT3(i915) ?
174 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
175 
176 	/* Apply the GT workarounds... */
177 	intel_gt_apply_workarounds(gt);
178 	/* ...and determine whether they are sticking. */
179 	intel_gt_verify_workarounds(gt, "init");
180 
181 	intel_gt_init_swizzling(gt);
182 
183 	/*
184 	 * At least 830 can leave some of the unused rings
185 	 * "active" (ie. head != tail) after resume which
186 	 * will prevent c3 entry. Makes sure all unused rings
187 	 * are totally idle.
188 	 */
189 	init_unused_rings(gt);
190 
191 	ret = i915_ppgtt_init_hw(gt);
192 	if (ret) {
193 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
194 		goto out;
195 	}
196 
197 	/* We can't enable contexts until all firmware is loaded */
198 	ret = intel_uc_init_hw(&gt->uc);
199 	if (ret) {
200 		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
201 		goto out;
202 	}
203 
204 	intel_mocs_init(gt);
205 
206 out:
207 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
208 	return ret;
209 }
210 
rmw_set(struct intel_uncore * uncore,i915_reg_t reg,u32 set)211 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
212 {
213 	intel_uncore_rmw(uncore, reg, 0, set);
214 }
215 
rmw_clear(struct intel_uncore * uncore,i915_reg_t reg,u32 clr)216 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
217 {
218 	intel_uncore_rmw(uncore, reg, clr, 0);
219 }
220 
clear_register(struct intel_uncore * uncore,i915_reg_t reg)221 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
222 {
223 	intel_uncore_rmw(uncore, reg, 0, 0);
224 }
225 
gen6_clear_engine_error_register(struct intel_engine_cs * engine)226 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
227 {
228 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
229 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
230 }
231 
232 void
intel_gt_clear_error_registers(struct intel_gt * gt,intel_engine_mask_t engine_mask)233 intel_gt_clear_error_registers(struct intel_gt *gt,
234 			       intel_engine_mask_t engine_mask)
235 {
236 	struct drm_i915_private *i915 = gt->i915;
237 	struct intel_uncore *uncore = gt->uncore;
238 	u32 eir;
239 
240 	if (GRAPHICS_VER(i915) != 2)
241 		clear_register(uncore, PGTBL_ER);
242 
243 	if (GRAPHICS_VER(i915) < 4)
244 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
245 	else
246 		clear_register(uncore, IPEIR_I965);
247 
248 	clear_register(uncore, EIR);
249 	eir = intel_uncore_read(uncore, EIR);
250 	if (eir) {
251 		/*
252 		 * some errors might have become stuck,
253 		 * mask them.
254 		 */
255 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
256 		rmw_set(uncore, EMR, eir);
257 		intel_uncore_write(uncore, GEN2_IIR,
258 				   I915_MASTER_ERROR_INTERRUPT);
259 	}
260 
261 	if (GRAPHICS_VER(i915) >= 12) {
262 		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
263 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
264 	} else if (GRAPHICS_VER(i915) >= 8) {
265 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
266 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
267 	} else if (GRAPHICS_VER(i915) >= 6) {
268 		struct intel_engine_cs *engine;
269 		enum intel_engine_id id;
270 
271 		for_each_engine_masked(engine, gt, engine_mask, id)
272 			gen6_clear_engine_error_register(engine);
273 	}
274 }
275 
gen6_check_faults(struct intel_gt * gt)276 static void gen6_check_faults(struct intel_gt *gt)
277 {
278 	struct intel_engine_cs *engine;
279 	enum intel_engine_id id;
280 	u32 fault;
281 
282 	for_each_engine(engine, gt, id) {
283 		fault = GEN6_RING_FAULT_REG_READ(engine);
284 		if (fault & RING_FAULT_VALID) {
285 			drm_dbg(&engine->i915->drm, "Unexpected fault\n"
286 				"\tAddr: 0x%08lx\n"
287 				"\tAddress space: %s\n"
288 				"\tSource ID: %d\n"
289 				"\tType: %d\n",
290 				fault & PAGE_MASK,
291 				fault & RING_FAULT_GTTSEL_MASK ?
292 				"GGTT" : "PPGTT",
293 				RING_FAULT_SRCID(fault),
294 				RING_FAULT_FAULT_TYPE(fault));
295 		}
296 	}
297 }
298 
gen8_check_faults(struct intel_gt * gt)299 static void gen8_check_faults(struct intel_gt *gt)
300 {
301 	struct intel_uncore *uncore = gt->uncore;
302 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
303 	u32 fault;
304 
305 	if (GRAPHICS_VER(gt->i915) >= 12) {
306 		fault_reg = GEN12_RING_FAULT_REG;
307 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
308 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
309 	} else {
310 		fault_reg = GEN8_RING_FAULT_REG;
311 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
312 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
313 	}
314 
315 	fault = intel_uncore_read(uncore, fault_reg);
316 	if (fault & RING_FAULT_VALID) {
317 		u32 fault_data0, fault_data1;
318 		u64 fault_addr;
319 
320 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
321 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
322 
323 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
324 			     ((u64)fault_data0 << 12);
325 
326 		drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
327 			"\tAddr: 0x%08x_%08x\n"
328 			"\tAddress space: %s\n"
329 			"\tEngine ID: %d\n"
330 			"\tSource ID: %d\n"
331 			"\tType: %d\n",
332 			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
333 			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
334 			GEN8_RING_FAULT_ENGINE_ID(fault),
335 			RING_FAULT_SRCID(fault),
336 			RING_FAULT_FAULT_TYPE(fault));
337 	}
338 }
339 
intel_gt_check_and_clear_faults(struct intel_gt * gt)340 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
341 {
342 	struct drm_i915_private *i915 = gt->i915;
343 
344 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
345 	if (GRAPHICS_VER(i915) >= 8)
346 		gen8_check_faults(gt);
347 	else if (GRAPHICS_VER(i915) >= 6)
348 		gen6_check_faults(gt);
349 	else
350 		return;
351 
352 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
353 }
354 
intel_gt_flush_ggtt_writes(struct intel_gt * gt)355 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
356 {
357 	struct intel_uncore *uncore = gt->uncore;
358 	intel_wakeref_t wakeref;
359 
360 	/*
361 	 * No actual flushing is required for the GTT write domain for reads
362 	 * from the GTT domain. Writes to it "immediately" go to main memory
363 	 * as far as we know, so there's no chipset flush. It also doesn't
364 	 * land in the GPU render cache.
365 	 *
366 	 * However, we do have to enforce the order so that all writes through
367 	 * the GTT land before any writes to the device, such as updates to
368 	 * the GATT itself.
369 	 *
370 	 * We also have to wait a bit for the writes to land from the GTT.
371 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
372 	 * timing. This issue has only been observed when switching quickly
373 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
374 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
375 	 * system agents we cannot reproduce this behaviour, until Cannonlake
376 	 * that was!).
377 	 */
378 
379 	wmb();
380 
381 	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
382 		return;
383 
384 	intel_gt_chipset_flush(gt);
385 
386 	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
387 		unsigned long flags;
388 
389 		spin_lock_irqsave(&uncore->lock, flags);
390 		intel_uncore_posting_read_fw(uncore,
391 					     RING_HEAD(RENDER_RING_BASE));
392 		spin_unlock_irqrestore(&uncore->lock, flags);
393 	}
394 }
395 
intel_gt_chipset_flush(struct intel_gt * gt)396 void intel_gt_chipset_flush(struct intel_gt *gt)
397 {
398 	wmb();
399 	if (GRAPHICS_VER(gt->i915) < 6)
400 		intel_ggtt_gmch_flush();
401 }
402 
intel_gt_driver_register(struct intel_gt * gt)403 void intel_gt_driver_register(struct intel_gt *gt)
404 {
405 	intel_gsc_init(&gt->gsc, gt->i915);
406 
407 	intel_rps_driver_register(&gt->rps);
408 
409 	intel_gt_debugfs_register(gt);
410 	intel_gt_sysfs_register(gt);
411 }
412 
intel_gt_init_scratch(struct intel_gt * gt,unsigned int size)413 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
414 {
415 	struct drm_i915_private *i915 = gt->i915;
416 	struct drm_i915_gem_object *obj;
417 	struct i915_vma *vma;
418 	int ret;
419 
420 	obj = i915_gem_object_create_lmem(i915, size,
421 					  I915_BO_ALLOC_VOLATILE |
422 					  I915_BO_ALLOC_GPU_ONLY);
423 	if (IS_ERR(obj))
424 		obj = i915_gem_object_create_stolen(i915, size);
425 	if (IS_ERR(obj))
426 		obj = i915_gem_object_create_internal(i915, size);
427 	if (IS_ERR(obj)) {
428 		drm_err(&i915->drm, "Failed to allocate scratch page\n");
429 		return PTR_ERR(obj);
430 	}
431 
432 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
433 	if (IS_ERR(vma)) {
434 		ret = PTR_ERR(vma);
435 		goto err_unref;
436 	}
437 
438 	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
439 	if (ret)
440 		goto err_unref;
441 
442 	gt->scratch = i915_vma_make_unshrinkable(vma);
443 
444 	return 0;
445 
446 err_unref:
447 	i915_gem_object_put(obj);
448 	return ret;
449 }
450 
intel_gt_fini_scratch(struct intel_gt * gt)451 static void intel_gt_fini_scratch(struct intel_gt *gt)
452 {
453 	i915_vma_unpin_and_release(&gt->scratch, 0);
454 }
455 
kernel_vm(struct intel_gt * gt)456 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
457 {
458 	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
459 		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
460 	else
461 		return i915_vm_get(&gt->ggtt->vm);
462 }
463 
__engines_record_defaults(struct intel_gt * gt)464 static int __engines_record_defaults(struct intel_gt *gt)
465 {
466 	struct i915_request *requests[I915_NUM_ENGINES] = {};
467 	struct intel_engine_cs *engine;
468 	enum intel_engine_id id;
469 	int err = 0;
470 
471 	/*
472 	 * As we reset the gpu during very early sanitisation, the current
473 	 * register state on the GPU should reflect its defaults values.
474 	 * We load a context onto the hw (with restore-inhibit), then switch
475 	 * over to a second context to save that default register state. We
476 	 * can then prime every new context with that state so they all start
477 	 * from the same default HW values.
478 	 */
479 
480 	for_each_engine(engine, gt, id) {
481 		struct intel_renderstate so;
482 		struct intel_context *ce;
483 		struct i915_request *rq;
484 
485 		/* We must be able to switch to something! */
486 		GEM_BUG_ON(!engine->kernel_context);
487 
488 		ce = intel_context_create(engine);
489 		if (IS_ERR(ce)) {
490 			err = PTR_ERR(ce);
491 			goto out;
492 		}
493 
494 		err = intel_renderstate_init(&so, ce);
495 		if (err)
496 			goto err;
497 
498 		rq = i915_request_create(ce);
499 		if (IS_ERR(rq)) {
500 			err = PTR_ERR(rq);
501 			goto err_fini;
502 		}
503 
504 		err = intel_engine_emit_ctx_wa(rq);
505 		if (err)
506 			goto err_rq;
507 
508 		err = intel_renderstate_emit(&so, rq);
509 		if (err)
510 			goto err_rq;
511 
512 err_rq:
513 		requests[id] = i915_request_get(rq);
514 		i915_request_add(rq);
515 err_fini:
516 		intel_renderstate_fini(&so, ce);
517 err:
518 		if (err) {
519 			intel_context_put(ce);
520 			goto out;
521 		}
522 	}
523 
524 	/* Flush the default context image to memory, and enable powersaving. */
525 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
526 		err = -EIO;
527 		goto out;
528 	}
529 
530 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
531 		struct i915_request *rq;
532 		struct file *state;
533 
534 		rq = requests[id];
535 		if (!rq)
536 			continue;
537 
538 		if (rq->fence.error) {
539 			err = -EIO;
540 			goto out;
541 		}
542 
543 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
544 		if (!rq->context->state)
545 			continue;
546 
547 		/* Keep a copy of the state's backing pages; free the obj */
548 		state = shmem_create_from_object(rq->context->state->obj);
549 		if (IS_ERR(state)) {
550 			err = PTR_ERR(state);
551 			goto out;
552 		}
553 		rq->engine->default_state = state;
554 	}
555 
556 out:
557 	/*
558 	 * If we have to abandon now, we expect the engines to be idle
559 	 * and ready to be torn-down. The quickest way we can accomplish
560 	 * this is by declaring ourselves wedged.
561 	 */
562 	if (err)
563 		intel_gt_set_wedged(gt);
564 
565 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
566 		struct intel_context *ce;
567 		struct i915_request *rq;
568 
569 		rq = requests[id];
570 		if (!rq)
571 			continue;
572 
573 		ce = rq->context;
574 		i915_request_put(rq);
575 		intel_context_put(ce);
576 	}
577 	return err;
578 }
579 
__engines_verify_workarounds(struct intel_gt * gt)580 static int __engines_verify_workarounds(struct intel_gt *gt)
581 {
582 	struct intel_engine_cs *engine;
583 	enum intel_engine_id id;
584 	int err = 0;
585 
586 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
587 		return 0;
588 
589 	for_each_engine(engine, gt, id) {
590 		if (intel_engine_verify_workarounds(engine, "load"))
591 			err = -EIO;
592 	}
593 
594 	/* Flush and restore the kernel context for safety */
595 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
596 		err = -EIO;
597 
598 	return err;
599 }
600 
__intel_gt_disable(struct intel_gt * gt)601 static void __intel_gt_disable(struct intel_gt *gt)
602 {
603 	intel_gt_set_wedged_on_fini(gt);
604 
605 	intel_gt_suspend_prepare(gt);
606 	intel_gt_suspend_late(gt);
607 
608 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
609 }
610 
intel_gt_wait_for_idle(struct intel_gt * gt,long timeout)611 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
612 {
613 	long remaining_timeout;
614 
615 	/* If the device is asleep, we have no requests outstanding */
616 	if (!intel_gt_pm_is_awake(gt))
617 		return 0;
618 
619 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
620 							   &remaining_timeout)) > 0) {
621 		cond_resched();
622 		if (signal_pending(current))
623 			return -EINTR;
624 	}
625 
626 	if (timeout)
627 		return timeout;
628 
629 	if (remaining_timeout < 0)
630 		remaining_timeout = 0;
631 
632 	return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
633 }
634 
intel_gt_init(struct intel_gt * gt)635 int intel_gt_init(struct intel_gt *gt)
636 {
637 	int err;
638 
639 	err = i915_inject_probe_error(gt->i915, -ENODEV);
640 	if (err)
641 		return err;
642 
643 	intel_gt_init_workarounds(gt);
644 
645 	/*
646 	 * This is just a security blanket to placate dragons.
647 	 * On some systems, we very sporadically observe that the first TLBs
648 	 * used by the CS may be stale, despite us poking the TLB reset. If
649 	 * we hold the forcewake during initialisation these problems
650 	 * just magically go away.
651 	 */
652 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
653 
654 	err = intel_gt_init_scratch(gt,
655 				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
656 	if (err)
657 		goto out_fw;
658 
659 	intel_gt_pm_init(gt);
660 
661 	gt->vm = kernel_vm(gt);
662 	if (!gt->vm) {
663 		err = -ENOMEM;
664 		goto err_pm;
665 	}
666 
667 	intel_set_mocs_index(gt);
668 
669 	err = intel_engines_init(gt);
670 	if (err)
671 		goto err_engines;
672 
673 	err = intel_uc_init(&gt->uc);
674 	if (err)
675 		goto err_engines;
676 
677 	err = intel_gt_resume(gt);
678 	if (err)
679 		goto err_uc_init;
680 
681 	err = intel_gt_init_hwconfig(gt);
682 	if (err)
683 		drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
684 			ERR_PTR(err));
685 
686 	err = __engines_record_defaults(gt);
687 	if (err)
688 		goto err_gt;
689 
690 	err = __engines_verify_workarounds(gt);
691 	if (err)
692 		goto err_gt;
693 
694 	intel_uc_init_late(&gt->uc);
695 
696 	err = i915_inject_probe_error(gt->i915, -EIO);
697 	if (err)
698 		goto err_gt;
699 
700 	intel_migrate_init(&gt->migrate, gt);
701 
702 	intel_pxp_init(&gt->pxp);
703 
704 	goto out_fw;
705 err_gt:
706 	__intel_gt_disable(gt);
707 	intel_uc_fini_hw(&gt->uc);
708 err_uc_init:
709 	intel_uc_fini(&gt->uc);
710 err_engines:
711 	intel_engines_release(gt);
712 	i915_vm_put(fetch_and_zero(&gt->vm));
713 err_pm:
714 	intel_gt_pm_fini(gt);
715 	intel_gt_fini_scratch(gt);
716 out_fw:
717 	if (err)
718 		intel_gt_set_wedged_on_init(gt);
719 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
720 	return err;
721 }
722 
intel_gt_driver_remove(struct intel_gt * gt)723 void intel_gt_driver_remove(struct intel_gt *gt)
724 {
725 	__intel_gt_disable(gt);
726 
727 	intel_migrate_fini(&gt->migrate);
728 	intel_uc_driver_remove(&gt->uc);
729 
730 	intel_engines_release(gt);
731 
732 	intel_gt_flush_buffer_pool(gt);
733 }
734 
intel_gt_driver_unregister(struct intel_gt * gt)735 void intel_gt_driver_unregister(struct intel_gt *gt)
736 {
737 	intel_wakeref_t wakeref;
738 
739 	intel_gt_sysfs_unregister(gt);
740 	intel_rps_driver_unregister(&gt->rps);
741 	intel_gsc_fini(&gt->gsc);
742 
743 	intel_pxp_fini(&gt->pxp);
744 
745 	/*
746 	 * Upon unregistering the device to prevent any new users, cancel
747 	 * all in-flight requests so that we can quickly unbind the active
748 	 * resources.
749 	 */
750 	intel_gt_set_wedged_on_fini(gt);
751 
752 	/* Scrub all HW state upon release */
753 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
754 		__intel_gt_reset(gt, ALL_ENGINES);
755 }
756 
intel_gt_driver_release(struct intel_gt * gt)757 void intel_gt_driver_release(struct intel_gt *gt)
758 {
759 	struct i915_address_space *vm;
760 
761 	vm = fetch_and_zero(&gt->vm);
762 	if (vm) /* FIXME being called twice on error paths :( */
763 		i915_vm_put(vm);
764 
765 	intel_wa_list_free(&gt->wa_list);
766 	intel_gt_pm_fini(gt);
767 	intel_gt_fini_scratch(gt);
768 	intel_gt_fini_buffer_pool(gt);
769 	intel_gt_fini_hwconfig(gt);
770 }
771 
intel_gt_driver_late_release_all(struct drm_i915_private * i915)772 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
773 {
774 	struct intel_gt *gt;
775 	unsigned int id;
776 
777 	/* We need to wait for inflight RCU frees to release their grip */
778 	rcu_barrier();
779 
780 	for_each_gt(gt, i915, id) {
781 		intel_uc_driver_late_release(&gt->uc);
782 		intel_gt_fini_requests(gt);
783 		intel_gt_fini_reset(gt);
784 		intel_gt_fini_timelines(gt);
785 		mutex_destroy(&gt->tlb.invalidate_lock);
786 		intel_engines_free(gt);
787 	}
788 }
789 
intel_gt_tile_setup(struct intel_gt * gt,phys_addr_t phys_addr)790 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
791 {
792 	int ret;
793 
794 	if (!gt_is_root(gt)) {
795 		struct intel_uncore *uncore;
796 		spinlock_t *irq_lock;
797 
798 		uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
799 		if (!uncore)
800 			return -ENOMEM;
801 
802 		irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
803 		if (!irq_lock)
804 			return -ENOMEM;
805 
806 		gt->uncore = uncore;
807 		gt->irq_lock = irq_lock;
808 
809 		intel_gt_common_init_early(gt);
810 	}
811 
812 	intel_uncore_init_early(gt->uncore, gt);
813 
814 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
815 	if (ret)
816 		return ret;
817 
818 	gt->phys_addr = phys_addr;
819 
820 	return 0;
821 }
822 
intel_gt_probe_all(struct drm_i915_private * i915)823 int intel_gt_probe_all(struct drm_i915_private *i915)
824 {
825 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
826 	struct intel_gt *gt = &i915->gt0;
827 	const struct intel_gt_definition *gtdef;
828 	phys_addr_t phys_addr;
829 	unsigned int mmio_bar;
830 	unsigned int i;
831 	int ret;
832 
833 	mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
834 	phys_addr = pci_resource_start(pdev, mmio_bar);
835 
836 	/*
837 	 * We always have at least one primary GT on any device
838 	 * and it has been already initialized early during probe
839 	 * in i915_driver_probe()
840 	 */
841 	gt->i915 = i915;
842 	gt->name = "Primary GT";
843 	gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
844 
845 	drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
846 	ret = intel_gt_tile_setup(gt, phys_addr);
847 	if (ret)
848 		return ret;
849 
850 	i915->gt[0] = gt;
851 
852 	if (!HAS_EXTRA_GT_LIST(i915))
853 		return 0;
854 
855 	for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
856 	     gtdef->name != NULL;
857 	     i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
858 		gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
859 		if (!gt) {
860 			ret = -ENOMEM;
861 			goto err;
862 		}
863 
864 		gt->i915 = i915;
865 		gt->name = gtdef->name;
866 		gt->type = gtdef->type;
867 		gt->info.engine_mask = gtdef->engine_mask;
868 		gt->info.id = i;
869 
870 		drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
871 		if (GEM_WARN_ON(range_overflows_t(resource_size_t,
872 						  gtdef->mapping_base,
873 						  SZ_16M,
874 						  pci_resource_len(pdev, mmio_bar)))) {
875 			ret = -ENODEV;
876 			goto err;
877 		}
878 
879 		switch (gtdef->type) {
880 		case GT_TILE:
881 			ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
882 			break;
883 
884 		case GT_MEDIA:
885 			ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
886 						     gtdef->gsi_offset);
887 			break;
888 
889 		case GT_PRIMARY:
890 			/* Primary GT should not appear in extra GT list */
891 		default:
892 			MISSING_CASE(gtdef->type);
893 			ret = -ENODEV;
894 		}
895 
896 		if (ret)
897 			goto err;
898 
899 		i915->gt[i] = gt;
900 	}
901 
902 	return 0;
903 
904 err:
905 	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
906 	intel_gt_release_all(i915);
907 
908 	return ret;
909 }
910 
intel_gt_tiles_init(struct drm_i915_private * i915)911 int intel_gt_tiles_init(struct drm_i915_private *i915)
912 {
913 	struct intel_gt *gt;
914 	unsigned int id;
915 	int ret;
916 
917 	for_each_gt(gt, i915, id) {
918 		ret = intel_gt_probe_lmem(gt);
919 		if (ret)
920 			return ret;
921 	}
922 
923 	return 0;
924 }
925 
intel_gt_release_all(struct drm_i915_private * i915)926 void intel_gt_release_all(struct drm_i915_private *i915)
927 {
928 	struct intel_gt *gt;
929 	unsigned int id;
930 
931 	for_each_gt(gt, i915, id)
932 		i915->gt[id] = NULL;
933 }
934 
intel_gt_info_print(const struct intel_gt_info * info,struct drm_printer * p)935 void intel_gt_info_print(const struct intel_gt_info *info,
936 			 struct drm_printer *p)
937 {
938 	drm_printf(p, "available engines: %x\n", info->engine_mask);
939 
940 	intel_sseu_dump(&info->sseu, p);
941 }
942 
943 struct reg_and_bit {
944 	i915_reg_t reg;
945 	u32 bit;
946 };
947 
948 static struct reg_and_bit
get_reg_and_bit(const struct intel_engine_cs * engine,const bool gen8,const i915_reg_t * regs,const unsigned int num)949 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
950 		const i915_reg_t *regs, const unsigned int num)
951 {
952 	const unsigned int class = engine->class;
953 	struct reg_and_bit rb = { };
954 
955 	if (drm_WARN_ON_ONCE(&engine->i915->drm,
956 			     class >= num || !regs[class].reg))
957 		return rb;
958 
959 	rb.reg = regs[class];
960 	if (gen8 && class == VIDEO_DECODE_CLASS)
961 		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
962 	else
963 		rb.bit = engine->instance;
964 
965 	rb.bit = BIT(rb.bit);
966 
967 	return rb;
968 }
969 
mmio_invalidate_full(struct intel_gt * gt)970 static void mmio_invalidate_full(struct intel_gt *gt)
971 {
972 	static const i915_reg_t gen8_regs[] = {
973 		[RENDER_CLASS]			= GEN8_RTCR,
974 		[VIDEO_DECODE_CLASS]		= GEN8_M1TCR, /* , GEN8_M2TCR */
975 		[VIDEO_ENHANCEMENT_CLASS]	= GEN8_VTCR,
976 		[COPY_ENGINE_CLASS]		= GEN8_BTCR,
977 	};
978 	static const i915_reg_t gen12_regs[] = {
979 		[RENDER_CLASS]			= GEN12_GFX_TLB_INV_CR,
980 		[VIDEO_DECODE_CLASS]		= GEN12_VD_TLB_INV_CR,
981 		[VIDEO_ENHANCEMENT_CLASS]	= GEN12_VE_TLB_INV_CR,
982 		[COPY_ENGINE_CLASS]		= GEN12_BLT_TLB_INV_CR,
983 		[COMPUTE_CLASS]			= GEN12_COMPCTX_TLB_INV_CR,
984 	};
985 	struct drm_i915_private *i915 = gt->i915;
986 	struct intel_uncore *uncore = gt->uncore;
987 	struct intel_engine_cs *engine;
988 	intel_engine_mask_t awake, tmp;
989 	enum intel_engine_id id;
990 	const i915_reg_t *regs;
991 	unsigned int num = 0;
992 
993 	if (GRAPHICS_VER(i915) == 12) {
994 		regs = gen12_regs;
995 		num = ARRAY_SIZE(gen12_regs);
996 	} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
997 		regs = gen8_regs;
998 		num = ARRAY_SIZE(gen8_regs);
999 	} else if (GRAPHICS_VER(i915) < 8) {
1000 		return;
1001 	}
1002 
1003 	if (drm_WARN_ONCE(&i915->drm, !num,
1004 			  "Platform does not implement TLB invalidation!"))
1005 		return;
1006 
1007 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1008 
1009 	spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
1010 
1011 	awake = 0;
1012 	for_each_engine(engine, gt, id) {
1013 		struct reg_and_bit rb;
1014 
1015 		if (!intel_engine_pm_is_awake(engine))
1016 			continue;
1017 
1018 		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1019 		if (!i915_mmio_reg_offset(rb.reg))
1020 			continue;
1021 
1022 		if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
1023 		    engine->class == VIDEO_ENHANCEMENT_CLASS ||
1024 		    engine->class == COMPUTE_CLASS))
1025 			rb.bit = _MASKED_BIT_ENABLE(rb.bit);
1026 
1027 		intel_uncore_write_fw(uncore, rb.reg, rb.bit);
1028 		awake |= engine->mask;
1029 	}
1030 
1031 	GT_TRACE(gt, "invalidated engines %08x\n", awake);
1032 
1033 	/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
1034 	if (awake &&
1035 	    (IS_TIGERLAKE(i915) ||
1036 	     IS_DG1(i915) ||
1037 	     IS_ROCKETLAKE(i915) ||
1038 	     IS_ALDERLAKE_S(i915) ||
1039 	     IS_ALDERLAKE_P(i915)))
1040 		intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
1041 
1042 	spin_unlock_irq(&uncore->lock);
1043 
1044 	for_each_engine_masked(engine, gt, awake, tmp) {
1045 		struct reg_and_bit rb;
1046 
1047 		/*
1048 		 * HW architecture suggest typical invalidation time at 40us,
1049 		 * with pessimistic cases up to 100us and a recommendation to
1050 		 * cap at 1ms. We go a bit higher just in case.
1051 		 */
1052 		const unsigned int timeout_us = 100;
1053 		const unsigned int timeout_ms = 4;
1054 
1055 		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1056 		if (__intel_wait_for_register_fw(uncore,
1057 						 rb.reg, rb.bit, 0,
1058 						 timeout_us, timeout_ms,
1059 						 NULL))
1060 			drm_err_ratelimited(&gt->i915->drm,
1061 					    "%s TLB invalidation did not complete in %ums!\n",
1062 					    engine->name, timeout_ms);
1063 	}
1064 
1065 	/*
1066 	 * Use delayed put since a) we mostly expect a flurry of TLB
1067 	 * invalidations so it is good to avoid paying the forcewake cost and
1068 	 * b) it works around a bug in Icelake which cannot cope with too rapid
1069 	 * transitions.
1070 	 */
1071 	intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1072 }
1073 
tlb_seqno_passed(const struct intel_gt * gt,u32 seqno)1074 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1075 {
1076 	u32 cur = intel_gt_tlb_seqno(gt);
1077 
1078 	/* Only skip if a *full* TLB invalidate barrier has passed */
1079 	return (s32)(cur - ALIGN(seqno, 2)) > 0;
1080 }
1081 
intel_gt_invalidate_tlb(struct intel_gt * gt,u32 seqno)1082 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1083 {
1084 	intel_wakeref_t wakeref;
1085 
1086 	if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1087 		return;
1088 
1089 	if (intel_gt_is_wedged(gt))
1090 		return;
1091 
1092 	if (tlb_seqno_passed(gt, seqno))
1093 		return;
1094 
1095 	with_intel_gt_pm_if_awake(gt, wakeref) {
1096 		mutex_lock(&gt->tlb.invalidate_lock);
1097 		if (tlb_seqno_passed(gt, seqno))
1098 			goto unlock;
1099 
1100 		mmio_invalidate_full(gt);
1101 
1102 		write_seqcount_invalidate(&gt->tlb.seqno);
1103 unlock:
1104 		mutex_unlock(&gt->tlb.invalidate_lock);
1105 	}
1106 }
1107