1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_CONTEXT_TYPES__ 7 #define __INTEL_CONTEXT_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/kref.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/types.h> 14 15 #include "i915_active_types.h" 16 #include "i915_sw_fence.h" 17 #include "i915_utils.h" 18 #include "intel_engine_types.h" 19 #include "intel_sseu.h" 20 21 #include "uc/intel_guc_fwif.h" 22 23 #define CONTEXT_REDZONE POISON_INUSE 24 DECLARE_EWMA(runtime, 3, 8); 25 26 struct i915_gem_context; 27 struct i915_gem_ww_ctx; 28 struct i915_vma; 29 struct intel_breadcrumbs; 30 struct intel_context; 31 struct intel_ring; 32 33 struct intel_context_ops { 34 unsigned long flags; 35 #define COPS_HAS_INFLIGHT_BIT 0 36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) 37 38 #define COPS_RUNTIME_CYCLES_BIT 1 39 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) 40 41 int (*alloc)(struct intel_context *ce); 42 43 void (*ban)(struct intel_context *ce, struct i915_request *rq); 44 45 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); 46 int (*pin)(struct intel_context *ce, void *vaddr); 47 void (*unpin)(struct intel_context *ce); 48 void (*post_unpin)(struct intel_context *ce); 49 50 void (*cancel_request)(struct intel_context *ce, 51 struct i915_request *rq); 52 53 void (*enter)(struct intel_context *ce); 54 void (*exit)(struct intel_context *ce); 55 56 void (*sched_disable)(struct intel_context *ce); 57 58 void (*reset)(struct intel_context *ce); 59 void (*destroy)(struct kref *kref); 60 61 /* virtual/parallel engine/context interface */ 62 struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, 63 unsigned int count, 64 unsigned long flags); 65 struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, 66 unsigned int num_siblings, 67 unsigned int width); 68 struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, 69 unsigned int sibling); 70 }; 71 72 struct intel_context { 73 /* 74 * Note: Some fields may be accessed under RCU. 75 * 76 * Unless otherwise noted a field can safely be assumed to be protected 77 * by strong reference counting. 78 */ 79 union { 80 struct kref ref; /* no kref_get_unless_zero()! */ 81 struct rcu_head rcu; 82 }; 83 84 struct intel_engine_cs *engine; 85 struct intel_engine_cs *inflight; 86 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) 87 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) 88 #define intel_context_inflight(ce) \ 89 __intel_context_inflight(READ_ONCE((ce)->inflight)) 90 #define intel_context_inflight_count(ce) \ 91 __intel_context_inflight_count(READ_ONCE((ce)->inflight)) 92 93 struct i915_address_space *vm; 94 struct i915_gem_context __rcu *gem_context; 95 96 /* 97 * @signal_lock protects the list of requests that need signaling, 98 * @signals. While there are any requests that need signaling, 99 * we add the context to the breadcrumbs worker, and remove it 100 * upon completion/cancellation of the last request. 101 */ 102 struct list_head signal_link; /* Accessed under RCU */ 103 struct list_head signals; /* Guarded by signal_lock */ 104 spinlock_t signal_lock; /* protects signals, the list of requests */ 105 106 struct i915_vma *state; 107 u32 ring_size; 108 struct intel_ring *ring; 109 struct intel_timeline *timeline; 110 111 unsigned long flags; 112 #define CONTEXT_BARRIER_BIT 0 113 #define CONTEXT_ALLOC_BIT 1 114 #define CONTEXT_INIT_BIT 2 115 #define CONTEXT_VALID_BIT 3 116 #define CONTEXT_CLOSED_BIT 4 117 #define CONTEXT_USE_SEMAPHORES 5 118 #define CONTEXT_BANNED 6 119 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 120 #define CONTEXT_NOPREEMPT 8 121 #define CONTEXT_LRCA_DIRTY 9 122 #define CONTEXT_GUC_INIT 10 123 #define CONTEXT_PERMA_PIN 11 124 #define CONTEXT_IS_PARKING 12 125 126 struct { 127 u64 timeout_us; 128 } watchdog; 129 130 u32 *lrc_reg_state; 131 union { 132 struct { 133 u32 lrca; 134 u32 ccid; 135 }; 136 u64 desc; 137 } lrc; 138 u32 tag; /* cookie passed to HW to track this context on submission */ 139 140 /** stats: Context GPU engine busyness tracking. */ 141 struct intel_context_stats { 142 u64 active; 143 144 /* Time on GPU as tracked by the hw. */ 145 struct { 146 struct ewma_runtime avg; 147 u64 total; 148 u32 last; 149 I915_SELFTEST_DECLARE(u32 num_underflow); 150 I915_SELFTEST_DECLARE(u32 max_underflow); 151 } runtime; 152 } stats; 153 154 unsigned int active_count; /* protected by timeline->mutex */ 155 156 atomic_t pin_count; 157 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ 158 159 /** 160 * active: Active tracker for the rq activity (inc. external) on this 161 * intel_context object. 162 */ 163 struct i915_active active; 164 165 const struct intel_context_ops *ops; 166 167 /** sseu: Control eu/slice partitioning */ 168 struct intel_sseu sseu; 169 170 /** 171 * pinned_contexts_link: List link for the engine's pinned contexts. 172 * This is only used if this is a perma-pinned kernel context and 173 * the list is assumed to only be manipulated during driver load 174 * or unload time so no mutex protection currently. 175 */ 176 struct list_head pinned_contexts_link; 177 178 u8 wa_bb_page; /* if set, page num reserved for context workarounds */ 179 180 struct { 181 /** @lock: protects everything in guc_state */ 182 spinlock_t lock; 183 /** 184 * @sched_state: scheduling state of this context using GuC 185 * submission 186 */ 187 u32 sched_state; 188 /* 189 * @fences: maintains a list of requests that are currently 190 * being fenced until a GuC operation completes 191 */ 192 struct list_head fences; 193 /** 194 * @blocked: fence used to signal when the blocking of a 195 * context's submissions is complete. 196 */ 197 struct i915_sw_fence blocked; 198 /** @number_committed_requests: number of committed requests */ 199 int number_committed_requests; 200 /** @requests: list of active requests on this context */ 201 struct list_head requests; 202 /** @prio: the context's current guc priority */ 203 u8 prio; 204 /** 205 * @prio_count: a counter of the number requests in flight in 206 * each priority bucket 207 */ 208 u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; 209 } guc_state; 210 211 struct { 212 /** 213 * @id: handle which is used to uniquely identify this context 214 * with the GuC, protected by guc->submission_state.lock 215 */ 216 u16 id; 217 /** 218 * @ref: the number of references to the guc_id, when 219 * transitioning in and out of zero protected by 220 * guc->submission_state.lock 221 */ 222 atomic_t ref; 223 /** 224 * @link: in guc->guc_id_list when the guc_id has no refs but is 225 * still valid, protected by guc->submission_state.lock 226 */ 227 struct list_head link; 228 } guc_id; 229 230 /** 231 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in 232 * list when context is pending to be destroyed (deregistered with the 233 * GuC), protected by guc->submission_state.lock 234 */ 235 struct list_head destroyed_link; 236 237 /** @parallel: sub-structure for parallel submission members */ 238 struct { 239 union { 240 /** 241 * @child_list: parent's list of children 242 * contexts, no protection as immutable after context 243 * creation 244 */ 245 struct list_head child_list; 246 /** 247 * @child_link: child's link into parent's list of 248 * children 249 */ 250 struct list_head child_link; 251 }; 252 /** @parent: pointer to parent if child */ 253 struct intel_context *parent; 254 /** 255 * @last_rq: last request submitted on a parallel context, used 256 * to insert submit fences between requests in the parallel 257 * context 258 */ 259 struct i915_request *last_rq; 260 /** 261 * @fence_context: fence context composite fence when doing 262 * parallel submission 263 */ 264 u64 fence_context; 265 /** 266 * @seqno: seqno for composite fence when doing parallel 267 * submission 268 */ 269 u32 seqno; 270 /** @number_children: number of children if parent */ 271 u8 number_children; 272 /** @child_index: index into child_list if child */ 273 u8 child_index; 274 /** @guc: GuC specific members for parallel submission */ 275 struct { 276 /** @wqi_head: cached head pointer in work queue */ 277 u16 wqi_head; 278 /** @wqi_tail: cached tail pointer in work queue */ 279 u16 wqi_tail; 280 /** @wq_head: pointer to the actual head in work queue */ 281 u32 *wq_head; 282 /** @wq_tail: pointer to the actual head in work queue */ 283 u32 *wq_tail; 284 /** @wq_status: pointer to the status in work queue */ 285 u32 *wq_status; 286 287 /** 288 * @parent_page: page in context state (ce->state) used 289 * by parent for work queue, process descriptor 290 */ 291 u8 parent_page; 292 } guc; 293 } parallel; 294 295 #ifdef CONFIG_DRM_I915_SELFTEST 296 /** 297 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest 298 */ 299 bool drop_schedule_enable; 300 301 /** 302 * @drop_schedule_disable: Force drop of schedule disable G2H for 303 * selftest 304 */ 305 bool drop_schedule_disable; 306 307 /** 308 * @drop_deregister: Force drop of deregister G2H for selftest 309 */ 310 bool drop_deregister; 311 #endif 312 }; 313 314 #endif /* __INTEL_CONTEXT_TYPES__ */ 315