1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT_TYPES__ 7 #define __INTEL_GT_TYPES__ 8 9 #include <linux/ktime.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/mutex.h> 13 #include <linux/notifier.h> 14 #include <linux/seqlock.h> 15 #include <linux/spinlock.h> 16 #include <linux/types.h> 17 #include <linux/workqueue.h> 18 19 #include "uc/intel_uc.h" 20 #include "intel_gsc.h" 21 22 #include "i915_vma.h" 23 #include "intel_engine_types.h" 24 #include "intel_gt_buffer_pool_types.h" 25 #include "intel_hwconfig.h" 26 #include "intel_llc_types.h" 27 #include "intel_reset_types.h" 28 #include "intel_rc6_types.h" 29 #include "intel_rps_types.h" 30 #include "intel_migrate_types.h" 31 #include "intel_wakeref.h" 32 #include "pxp/intel_pxp_types.h" 33 34 struct drm_i915_private; 35 struct i915_ggtt; 36 struct intel_engine_cs; 37 struct intel_uncore; 38 39 struct intel_mmio_range { 40 u32 start; 41 u32 end; 42 }; 43 44 /* 45 * The hardware has multiple kinds of multicast register ranges that need 46 * special register steering (and future platforms are expected to add 47 * additional types). 48 * 49 * During driver startup, we initialize the steering control register to 50 * direct reads to a slice/subslice that are valid for the 'subslice' class 51 * of multicast registers. If another type of steering does not have any 52 * overlap in valid steering targets with 'subslice' style registers, we will 53 * need to explicitly re-steer reads of registers of the other type. 54 * 55 * Only the replication types that may need additional non-default steering 56 * are listed here. 57 */ 58 enum intel_steering_type { 59 L3BANK, 60 MSLICE, 61 LNCF, 62 63 NUM_STEERING_TYPES 64 }; 65 66 enum intel_submission_method { 67 INTEL_SUBMISSION_RING, 68 INTEL_SUBMISSION_ELSP, 69 INTEL_SUBMISSION_GUC, 70 }; 71 72 struct intel_gt { 73 struct drm_i915_private *i915; 74 struct intel_uncore *uncore; 75 struct i915_ggtt *ggtt; 76 77 struct intel_uc uc; 78 struct intel_gsc gsc; 79 80 struct { 81 /* Serialize global tlb invalidations */ 82 struct mutex invalidate_lock; 83 84 /* 85 * Batch TLB invalidations 86 * 87 * After unbinding the PTE, we need to ensure the TLB 88 * are invalidated prior to releasing the physical pages. 89 * But we only need one such invalidation for all unbinds, 90 * so we track how many TLB invalidations have been 91 * performed since unbind the PTE and only emit an extra 92 * invalidate if no full barrier has been passed. 93 */ 94 seqcount_mutex_t seqno; 95 } tlb; 96 97 struct i915_wa_list wa_list; 98 99 struct intel_gt_timelines { 100 spinlock_t lock; /* protects active_list */ 101 struct list_head active_list; 102 } timelines; 103 104 struct intel_gt_requests { 105 /** 106 * We leave the user IRQ off as much as possible, 107 * but this means that requests will finish and never 108 * be retired once the system goes idle. Set a timer to 109 * fire periodically while the ring is running. When it 110 * fires, go retire requests. 111 */ 112 struct delayed_work retire_work; 113 } requests; 114 115 struct { 116 struct llist_head list; 117 struct work_struct work; 118 } watchdog; 119 120 struct intel_wakeref wakeref; 121 atomic_t user_wakeref; 122 123 struct list_head closed_vma; 124 spinlock_t closed_lock; /* guards the list of closed_vma */ 125 126 ktime_t last_init_time; 127 struct intel_reset reset; 128 129 /** 130 * Is the GPU currently considered idle, or busy executing 131 * userspace requests? Whilst idle, we allow runtime power 132 * management to power down the hardware and display clocks. 133 * In order to reduce the effect on performance, there 134 * is a slight delay before we do so. 135 */ 136 intel_wakeref_t awake; 137 138 u32 clock_frequency; 139 u32 clock_period_ns; 140 141 struct intel_llc llc; 142 struct intel_rc6 rc6; 143 struct intel_rps rps; 144 145 spinlock_t irq_lock; 146 u32 gt_imr; 147 u32 pm_ier; 148 u32 pm_imr; 149 150 u32 pm_guc_events; 151 152 struct { 153 bool active; 154 155 /** 156 * @lock: Lock protecting the below fields. 157 */ 158 seqcount_mutex_t lock; 159 160 /** 161 * @total: Total time this engine was busy. 162 * 163 * Accumulated time not counting the most recent block in cases 164 * where engine is currently busy (active > 0). 165 */ 166 ktime_t total; 167 168 /** 169 * @start: Timestamp of the last idle to active transition. 170 * 171 * Idle is defined as active == 0, active is active > 0. 172 */ 173 ktime_t start; 174 } stats; 175 176 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 177 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 178 [MAX_ENGINE_INSTANCE + 1]; 179 enum intel_submission_method submission_method; 180 181 /* 182 * Default address space (either GGTT or ppGTT depending on arch). 183 * 184 * Reserved for exclusive use by the kernel. 185 */ 186 struct i915_address_space *vm; 187 188 /* 189 * A pool of objects to use as shadow copies of client batch buffers 190 * when the command parser is enabled. Prevents the client from 191 * modifying the batch contents after software parsing. 192 * 193 * Buffers older than 1s are periodically reaped from the pool, 194 * or may be reclaimed by the shrinker before then. 195 */ 196 struct intel_gt_buffer_pool buffer_pool; 197 198 struct i915_vma *scratch; 199 200 struct intel_migrate migrate; 201 202 const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES]; 203 204 struct { 205 u8 groupid; 206 u8 instanceid; 207 } default_steering; 208 209 /* 210 * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT. 211 */ 212 phys_addr_t phys_addr; 213 214 struct intel_gt_info { 215 unsigned int id; 216 217 intel_engine_mask_t engine_mask; 218 219 u32 l3bank_mask; 220 221 u8 num_engines; 222 223 /* General presence of SFC units */ 224 u8 sfc_mask; 225 226 /* Media engine access to SFC per instance */ 227 u8 vdbox_sfc_access; 228 229 /* Slice/subslice/EU info */ 230 struct sseu_dev_info sseu; 231 232 unsigned long mslice_mask; 233 234 /** @hwconfig: hardware configuration data */ 235 struct intel_hwconfig hwconfig; 236 } info; 237 238 struct { 239 u8 uc_index; 240 } mocs; 241 242 struct intel_pxp pxp; 243 244 /* gt/gtN sysfs */ 245 struct kobject sysfs_gt; 246 }; 247 248 enum intel_gt_scratch_field { 249 /* 8 bytes */ 250 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, 251 252 /* 8 bytes */ 253 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, 254 255 /* 8 bytes */ 256 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, 257 258 /* 6 * 8 bytes */ 259 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, 260 261 /* 4 bytes */ 262 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, 263 }; 264 265 #endif /* __INTEL_GT_TYPES_H__ */ 266