1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2014-2019 Intel Corporation
4 */
5
6 #ifndef _INTEL_GUC_H_
7 #define _INTEL_GUC_H_
8
9 #include <linux/delay.h>
10 #include <linux/iosys-map.h>
11 #include <linux/xarray.h>
12
13 #include "intel_guc_ct.h"
14 #include "intel_guc_fw.h"
15 #include "intel_guc_fwif.h"
16 #include "intel_guc_log.h"
17 #include "intel_guc_reg.h"
18 #include "intel_guc_slpc_types.h"
19 #include "intel_uc_fw.h"
20 #include "intel_uncore.h"
21 #include "i915_utils.h"
22 #include "i915_vma.h"
23
24 struct __guc_ads_blob;
25 struct intel_guc_state_capture;
26
27 /**
28 * struct intel_guc - Top level structure of GuC.
29 *
30 * It handles firmware loading and manages client pool. intel_guc owns an
31 * i915_sched_engine for submission.
32 */
33 struct intel_guc {
34 /** @fw: the GuC firmware */
35 struct intel_uc_fw fw;
36 /** @log: sub-structure containing GuC log related data and objects */
37 struct intel_guc_log log;
38 /** @ct: the command transport communication channel */
39 struct intel_guc_ct ct;
40 /** @slpc: sub-structure containing SLPC related data and objects */
41 struct intel_guc_slpc slpc;
42 /** @capture: the error-state-capture module's data and objects */
43 struct intel_guc_state_capture *capture;
44
45 /** @sched_engine: Global engine used to submit requests to GuC */
46 struct i915_sched_engine *sched_engine;
47 /**
48 * @stalled_request: if GuC can't process a request for any reason, we
49 * save it until GuC restarts processing. No other request can be
50 * submitted until the stalled request is processed.
51 */
52 struct i915_request *stalled_request;
53 /**
54 * @submission_stall_reason: reason why submission is stalled
55 */
56 enum {
57 STALL_NONE,
58 STALL_REGISTER_CONTEXT,
59 STALL_MOVE_LRC_TAIL,
60 STALL_ADD_REQUEST,
61 } submission_stall_reason;
62
63 /* intel_guc_recv interrupt related state */
64 /** @irq_lock: protects GuC irq state */
65 spinlock_t irq_lock;
66 /**
67 * @msg_enabled_mask: mask of events that are processed when receiving
68 * an INTEL_GUC_ACTION_DEFAULT G2H message.
69 */
70 unsigned int msg_enabled_mask;
71
72 /**
73 * @outstanding_submission_g2h: number of outstanding GuC to Host
74 * responses related to GuC submission, used to determine if the GT is
75 * idle
76 */
77 atomic_t outstanding_submission_g2h;
78
79 /** @interrupts: pointers to GuC interrupt-managing functions. */
80 struct {
81 void (*reset)(struct intel_guc *guc);
82 void (*enable)(struct intel_guc *guc);
83 void (*disable)(struct intel_guc *guc);
84 } interrupts;
85
86 /**
87 * @submission_state: sub-structure for submission state protected by
88 * single lock
89 */
90 struct {
91 /**
92 * @lock: protects everything in submission_state,
93 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
94 * out of zero
95 */
96 spinlock_t lock;
97 /**
98 * @guc_ids: used to allocate new guc_ids, single-lrc
99 */
100 struct ida guc_ids;
101 /**
102 * @num_guc_ids: Number of guc_ids, selftest feature to be able
103 * to reduce this number while testing.
104 */
105 int num_guc_ids;
106 /**
107 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
108 */
109 unsigned long *guc_ids_bitmap;
110 /**
111 * @guc_id_list: list of intel_context with valid guc_ids but no
112 * refs
113 */
114 struct list_head guc_id_list;
115 /**
116 * @destroyed_contexts: list of contexts waiting to be destroyed
117 * (deregistered with the GuC)
118 */
119 struct list_head destroyed_contexts;
120 /**
121 * @destroyed_worker: worker to deregister contexts, need as we
122 * need to take a GT PM reference and can't from destroy
123 * function as it might be in an atomic context (no sleeping)
124 */
125 struct work_struct destroyed_worker;
126 /**
127 * @reset_fail_worker: worker to trigger a GT reset after an
128 * engine reset fails
129 */
130 struct work_struct reset_fail_worker;
131 /**
132 * @reset_fail_mask: mask of engines that failed to reset
133 */
134 intel_engine_mask_t reset_fail_mask;
135 } submission_state;
136
137 /**
138 * @submission_supported: tracks whether we support GuC submission on
139 * the current platform
140 */
141 bool submission_supported;
142 /** @submission_selected: tracks whether the user enabled GuC submission */
143 bool submission_selected;
144 /** @submission_initialized: tracks whether GuC submission has been initialised */
145 bool submission_initialized;
146 /**
147 * @rc_supported: tracks whether we support GuC rc on the current platform
148 */
149 bool rc_supported;
150 /** @rc_selected: tracks whether the user enabled GuC rc */
151 bool rc_selected;
152
153 /** @ads_vma: object allocated to hold the GuC ADS */
154 struct i915_vma *ads_vma;
155 /** @ads_map: contents of the GuC ADS */
156 struct iosys_map ads_map;
157 /** @ads_regset_size: size of the save/restore regsets in the ADS */
158 u32 ads_regset_size;
159 /**
160 * @ads_regset_count: number of save/restore registers in the ADS for
161 * each engine
162 */
163 u32 ads_regset_count[I915_NUM_ENGINES];
164 /** @ads_regset: save/restore regsets in the ADS */
165 struct guc_mmio_reg *ads_regset;
166 /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
167 u32 ads_golden_ctxt_size;
168 /** @ads_capture_size: size of register lists in the ADS used for error capture */
169 u32 ads_capture_size;
170 /** @ads_engine_usage_size: size of engine usage in the ADS */
171 u32 ads_engine_usage_size;
172
173 /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
174 struct i915_vma *lrc_desc_pool_v69;
175 /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
176 void *lrc_desc_pool_vaddr_v69;
177
178 /**
179 * @context_lookup: used to resolve intel_context from guc_id, if a
180 * context is present in this structure it is registered with the GuC
181 */
182 struct xarray context_lookup;
183
184 /** @params: Control params for fw initialization */
185 u32 params[GUC_CTL_MAX_DWORDS];
186
187 /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
188 struct {
189 u32 base;
190 unsigned int count;
191 enum forcewake_domains fw_domains;
192 } send_regs;
193
194 /** @notify_reg: register used to send interrupts to the GuC FW */
195 i915_reg_t notify_reg;
196
197 /**
198 * @mmio_msg: notification bitmask that the GuC writes in one of its
199 * registers when the CT channel is disabled, to be processed when the
200 * channel is back up.
201 */
202 u32 mmio_msg;
203
204 /** @send_mutex: used to serialize the intel_guc_send actions */
205 struct mutex send_mutex;
206
207 /**
208 * @timestamp: GT timestamp object that stores a copy of the timestamp
209 * and adjusts it for overflow using a worker.
210 */
211 struct {
212 /**
213 * @lock: Lock protecting the below fields and the engine stats.
214 */
215 spinlock_t lock;
216
217 /**
218 * @gt_stamp: 64 bit extended value of the GT timestamp.
219 */
220 u64 gt_stamp;
221
222 /**
223 * @ping_delay: Period for polling the GT timestamp for
224 * overflow.
225 */
226 unsigned long ping_delay;
227
228 /**
229 * @work: Periodic work to adjust GT timestamp, engine and
230 * context usage for overflows.
231 */
232 struct delayed_work work;
233
234 /**
235 * @shift: Right shift value for the gpm timestamp
236 */
237 u32 shift;
238
239 /**
240 * @last_stat_jiffies: jiffies at last actual stats collection time
241 * We use this timestamp to ensure we don't oversample the
242 * stats because runtime power management events can trigger
243 * stats collection at much higher rates than required.
244 */
245 unsigned long last_stat_jiffies;
246 } timestamp;
247
248 #ifdef CONFIG_DRM_I915_SELFTEST
249 /**
250 * @number_guc_id_stolen: The number of guc_ids that have been stolen
251 */
252 int number_guc_id_stolen;
253 #endif
254 };
255
log_to_guc(struct intel_guc_log * log)256 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
257 {
258 return container_of(log, struct intel_guc, log);
259 }
260
261 static
intel_guc_send(struct intel_guc * guc,const u32 * action,u32 len)262 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
263 {
264 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
265 }
266
267 static
intel_guc_send_nb(struct intel_guc * guc,const u32 * action,u32 len,u32 g2h_len_dw)268 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
269 u32 g2h_len_dw)
270 {
271 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
272 MAKE_SEND_FLAGS(g2h_len_dw));
273 }
274
275 static inline int
intel_guc_send_and_receive(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)276 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
277 u32 *response_buf, u32 response_buf_size)
278 {
279 return intel_guc_ct_send(&guc->ct, action, len,
280 response_buf, response_buf_size, 0);
281 }
282
intel_guc_send_busy_loop(struct intel_guc * guc,const u32 * action,u32 len,u32 g2h_len_dw,bool loop)283 static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
284 const u32 *action,
285 u32 len,
286 u32 g2h_len_dw,
287 bool loop)
288 {
289 int err;
290 unsigned int sleep_period_ms = 1;
291 bool not_atomic = !in_atomic() && !irqs_disabled();
292
293 /*
294 * FIXME: Have caller pass in if we are in an atomic context to avoid
295 * using in_atomic(). It is likely safe here as we check for irqs
296 * disabled which basically all the spin locks in the i915 do but
297 * regardless this should be cleaned up.
298 */
299
300 /* No sleeping with spin locks, just busy loop */
301 might_sleep_if(loop && not_atomic);
302
303 retry:
304 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
305 if (unlikely(err == -EBUSY && loop)) {
306 if (likely(not_atomic)) {
307 if (msleep_interruptible(sleep_period_ms))
308 return -EINTR;
309 sleep_period_ms = sleep_period_ms << 1;
310 } else {
311 cpu_relax();
312 }
313 goto retry;
314 }
315
316 return err;
317 }
318
intel_guc_to_host_event_handler(struct intel_guc * guc)319 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
320 {
321 intel_guc_ct_event_handler(&guc->ct);
322 }
323
324 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
325 #define GUC_GGTT_TOP 0xFEE00000
326
327 /**
328 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
329 * @guc: intel_guc structure.
330 * @vma: i915 graphics virtual memory area.
331 *
332 * GuC does not allow any gfx GGTT address that falls into range
333 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
334 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
335 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
336 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
337 *
338 * Return: GGTT offset of the @vma.
339 */
intel_guc_ggtt_offset(struct intel_guc * guc,struct i915_vma * vma)340 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
341 struct i915_vma *vma)
342 {
343 u32 offset = i915_ggtt_offset(vma);
344
345 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
346 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
347
348 return offset;
349 }
350
351 void intel_guc_init_early(struct intel_guc *guc);
352 void intel_guc_init_late(struct intel_guc *guc);
353 void intel_guc_init_send_regs(struct intel_guc *guc);
354 void intel_guc_write_params(struct intel_guc *guc);
355 int intel_guc_init(struct intel_guc *guc);
356 void intel_guc_fini(struct intel_guc *guc);
357 void intel_guc_notify(struct intel_guc *guc);
358 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
359 u32 *response_buf, u32 response_buf_size);
360 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
361 const u32 *payload, u32 len);
362 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
363 int intel_guc_suspend(struct intel_guc *guc);
364 int intel_guc_resume(struct intel_guc *guc);
365 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
366 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
367 struct i915_vma **out_vma, void **out_vaddr);
368 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
369 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
370
intel_guc_is_supported(struct intel_guc * guc)371 static inline bool intel_guc_is_supported(struct intel_guc *guc)
372 {
373 return intel_uc_fw_is_supported(&guc->fw);
374 }
375
intel_guc_is_wanted(struct intel_guc * guc)376 static inline bool intel_guc_is_wanted(struct intel_guc *guc)
377 {
378 return intel_uc_fw_is_enabled(&guc->fw);
379 }
380
intel_guc_is_used(struct intel_guc * guc)381 static inline bool intel_guc_is_used(struct intel_guc *guc)
382 {
383 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
384 return intel_uc_fw_is_available(&guc->fw);
385 }
386
intel_guc_is_fw_running(struct intel_guc * guc)387 static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
388 {
389 return intel_uc_fw_is_running(&guc->fw);
390 }
391
intel_guc_is_ready(struct intel_guc * guc)392 static inline bool intel_guc_is_ready(struct intel_guc *guc)
393 {
394 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
395 }
396
intel_guc_reset_interrupts(struct intel_guc * guc)397 static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
398 {
399 guc->interrupts.reset(guc);
400 }
401
intel_guc_enable_interrupts(struct intel_guc * guc)402 static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
403 {
404 guc->interrupts.enable(guc);
405 }
406
intel_guc_disable_interrupts(struct intel_guc * guc)407 static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
408 {
409 guc->interrupts.disable(guc);
410 }
411
intel_guc_sanitize(struct intel_guc * guc)412 static inline int intel_guc_sanitize(struct intel_guc *guc)
413 {
414 intel_uc_fw_sanitize(&guc->fw);
415 intel_guc_disable_interrupts(guc);
416 intel_guc_ct_sanitize(&guc->ct);
417 guc->mmio_msg = 0;
418
419 return 0;
420 }
421
intel_guc_enable_msg(struct intel_guc * guc,u32 mask)422 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
423 {
424 spin_lock_irq(&guc->irq_lock);
425 guc->msg_enabled_mask |= mask;
426 spin_unlock_irq(&guc->irq_lock);
427 }
428
intel_guc_disable_msg(struct intel_guc * guc,u32 mask)429 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
430 {
431 spin_lock_irq(&guc->irq_lock);
432 guc->msg_enabled_mask &= ~mask;
433 spin_unlock_irq(&guc->irq_lock);
434 }
435
436 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
437
438 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
439 const u32 *msg, u32 len);
440 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
441 const u32 *msg, u32 len);
442 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
443 const u32 *msg, u32 len);
444 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
445 const u32 *msg, u32 len);
446 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
447 const u32 *msg, u32 len);
448
449 struct intel_engine_cs *
450 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
451
452 void intel_guc_find_hung_context(struct intel_engine_cs *engine);
453
454 int intel_guc_global_policies_update(struct intel_guc *guc);
455
456 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
457
458 void intel_guc_submission_reset_prepare(struct intel_guc *guc);
459 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
460 void intel_guc_submission_reset_finish(struct intel_guc *guc);
461 void intel_guc_submission_cancel_requests(struct intel_guc *guc);
462
463 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
464
465 void intel_guc_write_barrier(struct intel_guc *guc);
466
467 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
468
469 #endif
470