1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3 /*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32
33 #include <uapi/drm/i915_drm.h>
34
35 #include <linux/pm_qos.h>
36
37 #include <drm/drm_connector.h>
38 #include <drm/ttm/ttm_device.h>
39
40 #include "display/intel_bios.h"
41 #include "display/intel_cdclk.h"
42 #include "display/intel_display.h"
43 #include "display/intel_display_power.h"
44 #include "display/intel_dmc.h"
45 #include "display/intel_dpll_mgr.h"
46 #include "display/intel_dsb.h"
47 #include "display/intel_fbc.h"
48 #include "display/intel_frontbuffer.h"
49 #include "display/intel_global_state.h"
50 #include "display/intel_gmbus.h"
51 #include "display/intel_opregion.h"
52
53 #include "gem/i915_gem_context_types.h"
54 #include "gem/i915_gem_lmem.h"
55 #include "gem/i915_gem_shrinker.h"
56 #include "gem/i915_gem_stolen.h"
57
58 #include "gt/intel_engine.h"
59 #include "gt/intel_gt_types.h"
60 #include "gt/intel_region_lmem.h"
61 #include "gt/intel_workarounds.h"
62 #include "gt/uc/intel_uc.h"
63
64 #include "i915_drm_client.h"
65 #include "i915_gem.h"
66 #include "i915_gpu_error.h"
67 #include "i915_params.h"
68 #include "i915_perf_types.h"
69 #include "i915_scheduler.h"
70 #include "i915_utils.h"
71 #include "intel_device_info.h"
72 #include "intel_memory_region.h"
73 #include "intel_pch.h"
74 #include "intel_pm_types.h"
75 #include "intel_runtime_pm.h"
76 #include "intel_step.h"
77 #include "intel_uncore.h"
78 #include "intel_wopcm.h"
79
80 struct dpll;
81 struct drm_i915_clock_gating_funcs;
82 struct drm_i915_gem_object;
83 struct drm_i915_private;
84 struct intel_atomic_state;
85 struct intel_audio_funcs;
86 struct intel_cdclk_config;
87 struct intel_cdclk_funcs;
88 struct intel_cdclk_state;
89 struct intel_cdclk_vals;
90 struct intel_color_funcs;
91 struct intel_connector;
92 struct intel_crtc;
93 struct intel_dp;
94 struct intel_dpll_funcs;
95 struct intel_encoder;
96 struct intel_fbdev;
97 struct intel_fdi_funcs;
98 struct intel_gmbus;
99 struct intel_hotplug_funcs;
100 struct intel_initial_plane_config;
101 struct intel_limit;
102 struct intel_overlay;
103 struct intel_overlay_error_state;
104 struct vlv_s0ix_state;
105
106 /* Threshold == 5 for long IRQs, 50 for short */
107 #define HPD_STORM_DEFAULT_THRESHOLD 50
108
109 struct i915_hotplug {
110 struct delayed_work hotplug_work;
111
112 const u32 *hpd, *pch_hpd;
113
114 struct {
115 unsigned long last_jiffies;
116 int count;
117 enum {
118 HPD_ENABLED = 0,
119 HPD_DISABLED = 1,
120 HPD_MARK_DISABLED = 2
121 } state;
122 } stats[HPD_NUM_PINS];
123 u32 event_bits;
124 u32 retry_bits;
125 struct delayed_work reenable_work;
126
127 u32 long_port_mask;
128 u32 short_port_mask;
129 struct work_struct dig_port_work;
130
131 struct work_struct poll_init_work;
132 bool poll_enabled;
133
134 unsigned int hpd_storm_threshold;
135 /* Whether or not to count short HPD IRQs in HPD storms */
136 u8 hpd_short_storm_enabled;
137
138 /*
139 * if we get a HPD irq from DP and a HPD irq from non-DP
140 * the non-DP HPD could block the workqueue on a mode config
141 * mutex getting, that userspace may have taken. However
142 * userspace is waiting on the DP workqueue to run which is
143 * blocked behind the non-DP one.
144 */
145 struct workqueue_struct *dp_wq;
146 };
147
148 #define I915_GEM_GPU_DOMAINS \
149 (I915_GEM_DOMAIN_RENDER | \
150 I915_GEM_DOMAIN_SAMPLER | \
151 I915_GEM_DOMAIN_COMMAND | \
152 I915_GEM_DOMAIN_INSTRUCTION | \
153 I915_GEM_DOMAIN_VERTEX)
154
155 struct sdvo_device_mapping {
156 u8 initialized;
157 u8 dvo_port;
158 u8 slave_addr;
159 u8 dvo_wiring;
160 u8 i2c_pin;
161 u8 ddc_pin;
162 };
163
164 /* functions used for watermark calcs for display. */
165 struct drm_i915_wm_disp_funcs {
166 /* update_wm is for legacy wm management */
167 void (*update_wm)(struct drm_i915_private *dev_priv);
168 int (*compute_pipe_wm)(struct intel_atomic_state *state,
169 struct intel_crtc *crtc);
170 int (*compute_intermediate_wm)(struct intel_atomic_state *state,
171 struct intel_crtc *crtc);
172 void (*initial_watermarks)(struct intel_atomic_state *state,
173 struct intel_crtc *crtc);
174 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
175 struct intel_crtc *crtc);
176 void (*optimize_watermarks)(struct intel_atomic_state *state,
177 struct intel_crtc *crtc);
178 int (*compute_global_watermarks)(struct intel_atomic_state *state);
179 };
180
181 struct drm_i915_display_funcs {
182 /* Returns the active state of the crtc, and if the crtc is active,
183 * fills out the pipe-config with the hw state. */
184 bool (*get_pipe_config)(struct intel_crtc *,
185 struct intel_crtc_state *);
186 void (*get_initial_plane_config)(struct intel_crtc *,
187 struct intel_initial_plane_config *);
188 void (*crtc_enable)(struct intel_atomic_state *state,
189 struct intel_crtc *crtc);
190 void (*crtc_disable)(struct intel_atomic_state *state,
191 struct intel_crtc *crtc);
192 void (*commit_modeset_enables)(struct intel_atomic_state *state);
193 };
194
195 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
196
197 enum drrs_type {
198 DRRS_TYPE_NONE,
199 DRRS_TYPE_STATIC,
200 DRRS_TYPE_SEAMLESS,
201 };
202
203 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
204 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
205 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
206 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
207 #define QUIRK_INCREASE_T12_DELAY (1<<6)
208 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
209 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
210
211 struct i915_suspend_saved_registers {
212 u32 saveDSPARB;
213 u32 saveSWF0[16];
214 u32 saveSWF1[16];
215 u32 saveSWF3[3];
216 u16 saveGCDGMBUS;
217 };
218
219 #define MAX_L3_SLICES 2
220 struct intel_l3_parity {
221 u32 *remap_info[MAX_L3_SLICES];
222 struct work_struct error_work;
223 int which_slice;
224 };
225
226 struct i915_gem_mm {
227 /*
228 * Shortcut for the stolen region. This points to either
229 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
230 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
231 * support stolen.
232 */
233 struct intel_memory_region *stolen_region;
234 /** Memory allocator for GTT stolen memory */
235 struct drm_mm stolen;
236 /** Protects the usage of the GTT stolen memory allocator. This is
237 * always the inner lock when overlapping with struct_mutex. */
238 struct mutex stolen_lock;
239
240 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
241 spinlock_t obj_lock;
242
243 /**
244 * List of objects which are purgeable.
245 */
246 struct list_head purge_list;
247
248 /**
249 * List of objects which have allocated pages and are shrinkable.
250 */
251 struct list_head shrink_list;
252
253 /**
254 * List of objects which are pending destruction.
255 */
256 struct llist_head free_list;
257 struct work_struct free_work;
258 /**
259 * Count of objects pending destructions. Used to skip needlessly
260 * waiting on an RCU barrier if no objects are waiting to be freed.
261 */
262 atomic_t free_count;
263
264 /**
265 * tmpfs instance used for shmem backed objects
266 */
267 struct vfsmount *gemfs;
268
269 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
270
271 struct notifier_block oom_notifier;
272 struct notifier_block vmap_notifier;
273 struct shrinker shrinker;
274
275 #ifdef CONFIG_MMU_NOTIFIER
276 /**
277 * notifier_lock for mmu notifiers, memory may not be allocated
278 * while holding this lock.
279 */
280 rwlock_t notifier_lock;
281 #endif
282
283 /* shrinker accounting, also useful for userland debugging */
284 u64 shrink_memory;
285 u32 shrink_count;
286 };
287
288 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
289
290 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
291 u64 context);
292
293 static inline unsigned long
i915_fence_timeout(const struct drm_i915_private * i915)294 i915_fence_timeout(const struct drm_i915_private *i915)
295 {
296 return i915_fence_context_timeout(i915, U64_MAX);
297 }
298
299 /* Amount of SAGV/QGV points, BSpec precisely defines this */
300 #define I915_NUM_QGV_POINTS 8
301
302 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
303
304 /* Amount of PSF GV points, BSpec precisely defines this */
305 #define I915_NUM_PSF_GV_POINTS 3
306
307 struct intel_vbt_data {
308 /* bdb version */
309 u16 version;
310
311 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
312 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
313
314 /* Feature bits */
315 unsigned int int_tv_support:1;
316 unsigned int lvds_dither:1;
317 unsigned int int_crt_support:1;
318 unsigned int lvds_use_ssc:1;
319 unsigned int int_lvds_support:1;
320 unsigned int display_clock_mode:1;
321 unsigned int fdi_rx_polarity_inverted:1;
322 unsigned int panel_type:4;
323 int lvds_ssc_freq;
324 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
325 enum drm_panel_orientation orientation;
326
327 bool override_afc_startup;
328 u8 override_afc_startup_val;
329
330 u8 seamless_drrs_min_refresh_rate;
331 enum drrs_type drrs_type;
332
333 struct {
334 int rate;
335 int lanes;
336 int preemphasis;
337 int vswing;
338 int bpp;
339 struct edp_power_seq pps;
340 u8 drrs_msa_timing_delay;
341 bool low_vswing;
342 bool initialized;
343 bool hobl;
344 } edp;
345
346 struct {
347 bool enable;
348 bool full_link;
349 bool require_aux_wakeup;
350 int idle_frames;
351 int tp1_wakeup_time_us;
352 int tp2_tp3_wakeup_time_us;
353 int psr2_tp2_tp3_wakeup_time_us;
354 } psr;
355
356 struct {
357 u16 pwm_freq_hz;
358 u16 brightness_precision_bits;
359 bool present;
360 bool active_low_pwm;
361 u8 min_brightness; /* min_brightness/255 of max */
362 u8 controller; /* brightness controller number */
363 enum intel_backlight_type type;
364 } backlight;
365
366 /* MIPI DSI */
367 struct {
368 u16 panel_id;
369 struct mipi_config *config;
370 struct mipi_pps_data *pps;
371 u16 bl_ports;
372 u16 cabc_ports;
373 u8 seq_version;
374 u32 size;
375 u8 *data;
376 const u8 *sequence[MIPI_SEQ_MAX];
377 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
378 enum drm_panel_orientation orientation;
379 } dsi;
380
381 int crt_ddc_pin;
382
383 struct list_head display_devices;
384 struct list_head bdb_blocks;
385
386 struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
387 struct sdvo_device_mapping sdvo_mappings[2];
388 };
389
390 struct i915_frontbuffer_tracking {
391 spinlock_t lock;
392
393 /*
394 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
395 * scheduled flips.
396 */
397 unsigned busy_bits;
398 unsigned flip_bits;
399 };
400
401 struct i915_virtual_gpu {
402 struct mutex lock; /* serialises sending of g2v_notify command pkts */
403 bool active;
404 u32 caps;
405 u32 *initial_mmio;
406 u8 *initial_cfg_space;
407 struct list_head entry;
408 };
409
410 struct i915_selftest_stash {
411 atomic_t counter;
412 struct ida mock_region_instances;
413 };
414
415 /* intel_audio.c private */
416 struct intel_audio_private {
417 /* Display internal audio functions */
418 const struct intel_audio_funcs *funcs;
419
420 /* hda/i915 audio component */
421 struct i915_audio_component *component;
422 bool component_registered;
423 /* mutex for audio/video sync */
424 struct mutex mutex;
425 int power_refcount;
426 u32 freq_cntrl;
427
428 /* Used to save the pipe-to-encoder mapping for audio */
429 struct intel_encoder *encoder_map[I915_MAX_PIPES];
430
431 /* necessary resource sharing with HDMI LPE audio driver. */
432 struct {
433 struct platform_device *platdev;
434 int irq;
435 } lpe;
436 };
437
438 struct drm_i915_private {
439 struct drm_device drm;
440
441 /* FIXME: Device release actions should all be moved to drmm_ */
442 bool do_release;
443
444 /* i915 device parameters */
445 struct i915_params params;
446
447 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
448 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
449 struct intel_driver_caps caps;
450
451 /**
452 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
453 * end of stolen which we can optionally use to create GEM objects
454 * backed by stolen memory. Note that stolen_usable_size tells us
455 * exactly how much of this we are actually allowed to use, given that
456 * some portion of it is in fact reserved for use by hardware functions.
457 */
458 struct resource dsm;
459 /**
460 * Reseved portion of Data Stolen Memory
461 */
462 struct resource dsm_reserved;
463
464 /*
465 * Stolen memory is segmented in hardware with different portions
466 * offlimits to certain functions.
467 *
468 * The drm_mm is initialised to the total accessible range, as found
469 * from the PCI config. On Broadwell+, this is further restricted to
470 * avoid the first page! The upper end of stolen memory is reserved for
471 * hardware functions and similarly removed from the accessible range.
472 */
473 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
474
475 struct intel_uncore uncore;
476 struct intel_uncore_mmio_debug mmio_debug;
477
478 struct i915_virtual_gpu vgpu;
479
480 struct intel_gvt *gvt;
481
482 struct intel_wopcm wopcm;
483
484 struct intel_dmc dmc;
485
486 struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
487
488 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
489 * controller on different i2c buses. */
490 struct mutex gmbus_mutex;
491
492 /**
493 * Base address of where the gmbus and gpio blocks are located (either
494 * on PCH or on SoC for platforms without PCH).
495 */
496 u32 gpio_mmio_base;
497
498 /* MMIO base address for MIPI regs */
499 u32 mipi_mmio_base;
500
501 u32 pps_mmio_base;
502
503 wait_queue_head_t gmbus_wait_queue;
504
505 struct pci_dev *bridge_dev;
506
507 struct rb_root uabi_engines;
508 unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
509
510 struct resource mch_res;
511
512 /* protects the irq masks */
513 spinlock_t irq_lock;
514
515 bool display_irqs_enabled;
516
517 /* Sideband mailbox protection */
518 struct mutex sb_lock;
519 struct pm_qos_request sb_qos;
520
521 /** Cached value of IMR to avoid reads in updating the bitfield */
522 union {
523 u32 irq_mask;
524 u32 de_irq_mask[I915_MAX_PIPES];
525 };
526 u32 pipestat_irq_mask[I915_MAX_PIPES];
527
528 struct i915_hotplug hotplug;
529 struct intel_fbc *fbc[I915_MAX_FBCS];
530 struct intel_opregion opregion;
531 struct intel_vbt_data vbt;
532
533 bool preserve_bios_swizzle;
534
535 /* overlay */
536 struct intel_overlay *overlay;
537
538 /* backlight registers and fields in struct intel_panel */
539 struct mutex backlight_lock;
540
541 /* protects panel power sequencer state */
542 struct mutex pps_mutex;
543
544 unsigned int fsb_freq, mem_freq, is_ddr3;
545 unsigned int skl_preferred_vco_freq;
546 unsigned int max_cdclk_freq;
547
548 unsigned int max_dotclk_freq;
549 unsigned int hpll_freq;
550 unsigned int fdi_pll_freq;
551 unsigned int czclk_freq;
552
553 struct {
554 /* The current hardware cdclk configuration */
555 struct intel_cdclk_config hw;
556
557 /* cdclk, divider, and ratio table from bspec */
558 const struct intel_cdclk_vals *table;
559
560 struct intel_global_obj obj;
561 } cdclk;
562
563 struct {
564 /* The current hardware dbuf configuration */
565 u8 enabled_slices;
566
567 struct intel_global_obj obj;
568 } dbuf;
569
570 /**
571 * wq - Driver workqueue for GEM.
572 *
573 * NOTE: Work items scheduled here are not allowed to grab any modeset
574 * locks, for otherwise the flushing done in the pageflip code will
575 * result in deadlocks.
576 */
577 struct workqueue_struct *wq;
578
579 /* ordered wq for modesets */
580 struct workqueue_struct *modeset_wq;
581 /* unbound hipri wq for page flips/plane updates */
582 struct workqueue_struct *flip_wq;
583
584 /* pm private clock gating functions */
585 const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
586
587 /* pm display functions */
588 const struct drm_i915_wm_disp_funcs *wm_disp;
589
590 /* irq display functions */
591 const struct intel_hotplug_funcs *hotplug_funcs;
592
593 /* fdi display functions */
594 const struct intel_fdi_funcs *fdi_funcs;
595
596 /* display pll funcs */
597 const struct intel_dpll_funcs *dpll_funcs;
598
599 /* Display functions */
600 const struct drm_i915_display_funcs *display;
601
602 /* Display internal color functions */
603 const struct intel_color_funcs *color_funcs;
604
605 /* Display CDCLK functions */
606 const struct intel_cdclk_funcs *cdclk_funcs;
607
608 /* PCH chipset type */
609 enum intel_pch pch_type;
610 unsigned short pch_id;
611
612 unsigned long quirks;
613
614 struct drm_atomic_state *modeset_restore_state;
615 struct drm_modeset_acquire_ctx reset_ctx;
616
617 struct i915_gem_mm mm;
618
619 /* Kernel Modesetting */
620
621 /**
622 * dpll and cdclk state is protected by connection_mutex
623 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
624 * Must be global rather than per dpll, because on some platforms plls
625 * share registers.
626 */
627 struct {
628 struct mutex lock;
629
630 int num_shared_dpll;
631 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
632 const struct intel_dpll_mgr *mgr;
633
634 struct {
635 int nssc;
636 int ssc;
637 } ref_clks;
638 } dpll;
639
640 struct list_head global_obj_list;
641
642 struct i915_frontbuffer_tracking fb_tracking;
643
644 struct intel_atomic_helper {
645 struct llist_head free_list;
646 struct work_struct free_work;
647 } atomic_helper;
648
649 bool mchbar_need_disable;
650
651 struct intel_l3_parity l3_parity;
652
653 /*
654 * HTI (aka HDPORT) state read during initial hw readout. Most
655 * platforms don't have HTI, so this will just stay 0. Those that do
656 * will use this later to figure out which PLLs and PHYs are unavailable
657 * for driver usage.
658 */
659 u32 hti_state;
660
661 /*
662 * edram size in MB.
663 * Cannot be determined by PCIID. You must always read a register.
664 */
665 u32 edram_size_mb;
666
667 struct i915_power_domains power_domains;
668
669 struct i915_gpu_error gpu_error;
670
671 /* list of fbdev register on this device */
672 struct intel_fbdev *fbdev;
673 struct work_struct fbdev_suspend_work;
674
675 struct drm_property *broadcast_rgb_property;
676 struct drm_property *force_audio_property;
677
678 u32 fdi_rx_config;
679
680 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
681 u32 chv_phy_control;
682 /*
683 * Shadows for CHV DPLL_MD regs to keep the state
684 * checker somewhat working in the presence hardware
685 * crappiness (can't read out DPLL_MD for pipes B & C).
686 */
687 u32 chv_dpll_md[I915_MAX_PIPES];
688 u32 bxt_phy_grc;
689
690 u32 suspend_count;
691 struct i915_suspend_saved_registers regfile;
692 struct vlv_s0ix_state *vlv_s0ix_state;
693
694 enum {
695 I915_SAGV_UNKNOWN = 0,
696 I915_SAGV_DISABLED,
697 I915_SAGV_ENABLED,
698 I915_SAGV_NOT_CONTROLLED
699 } sagv_status;
700
701 u32 sagv_block_time_us;
702
703 struct {
704 /*
705 * Raw watermark latency values:
706 * in 0.1us units for WM0,
707 * in 0.5us units for WM1+.
708 */
709 /* primary */
710 u16 pri_latency[5];
711 /* sprite */
712 u16 spr_latency[5];
713 /* cursor */
714 u16 cur_latency[5];
715 /*
716 * Raw watermark memory latency values
717 * for SKL for all 8 levels
718 * in 1us units.
719 */
720 u16 skl_latency[8];
721
722 /* current hardware state */
723 union {
724 struct ilk_wm_values hw;
725 struct vlv_wm_values vlv;
726 struct g4x_wm_values g4x;
727 };
728
729 u8 max_level;
730
731 /*
732 * Should be held around atomic WM register writing; also
733 * protects * intel_crtc->wm.active and
734 * crtc_state->wm.need_postvbl_update.
735 */
736 struct mutex wm_mutex;
737 } wm;
738
739 struct dram_info {
740 bool wm_lv_0_adjust_needed;
741 u8 num_channels;
742 bool symmetric_memory;
743 enum intel_dram_type {
744 INTEL_DRAM_UNKNOWN,
745 INTEL_DRAM_DDR3,
746 INTEL_DRAM_DDR4,
747 INTEL_DRAM_LPDDR3,
748 INTEL_DRAM_LPDDR4,
749 INTEL_DRAM_DDR5,
750 INTEL_DRAM_LPDDR5,
751 } type;
752 u8 num_qgv_points;
753 u8 num_psf_gv_points;
754 } dram_info;
755
756 struct intel_bw_info {
757 /* for each QGV point */
758 unsigned int deratedbw[I915_NUM_QGV_POINTS];
759 /* for each PSF GV point */
760 unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
761 u8 num_qgv_points;
762 u8 num_psf_gv_points;
763 u8 num_planes;
764 } max_bw[6];
765
766 struct intel_global_obj bw_obj;
767
768 struct intel_runtime_pm runtime_pm;
769
770 struct i915_perf perf;
771
772 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
773 struct intel_gt gt0;
774
775 /*
776 * i915->gt[0] == &i915->gt0
777 */
778 #define I915_MAX_GT 4
779 struct intel_gt *gt[I915_MAX_GT];
780
781 struct kobject *sysfs_gt;
782
783 struct {
784 struct i915_gem_contexts {
785 spinlock_t lock; /* locks list */
786 struct list_head list;
787 } contexts;
788
789 /*
790 * We replace the local file with a global mappings as the
791 * backing storage for the mmap is on the device and not
792 * on the struct file, and we do not want to prolong the
793 * lifetime of the local fd. To minimise the number of
794 * anonymous inodes we create, we use a global singleton to
795 * share the global mapping.
796 */
797 struct file *mmap_singleton;
798 } gem;
799
800 /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
801 u8 window2_delay;
802
803 u8 pch_ssc_use;
804
805 /* For i915gm/i945gm vblank irq workaround */
806 u8 vblank_enabled;
807
808 bool irq_enabled;
809
810 union {
811 /* perform PHY state sanity checks? */
812 bool chv_phy_assert[2];
813
814 /*
815 * DG2: Mask of PHYs that were not calibrated by the firmware
816 * and should not be used.
817 */
818 u8 snps_phy_failed_calibration;
819 };
820
821 bool ipc_enabled;
822
823 struct intel_audio_private audio;
824
825 struct i915_pmu pmu;
826
827 struct i915_drm_clients clients;
828
829 struct i915_hdcp_comp_master *hdcp_master;
830 bool hdcp_comp_added;
831
832 /* Mutex to protect the above hdcp component related values. */
833 struct mutex hdcp_comp_mutex;
834
835 /* The TTM device structure. */
836 struct ttm_device bdev;
837
838 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
839
840 /*
841 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
842 * will be rejected. Instead look for a better place.
843 */
844 };
845
to_i915(const struct drm_device * dev)846 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
847 {
848 return container_of(dev, struct drm_i915_private, drm);
849 }
850
kdev_to_i915(struct device * kdev)851 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
852 {
853 return dev_get_drvdata(kdev);
854 }
855
pdev_to_i915(struct pci_dev * pdev)856 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
857 {
858 return pci_get_drvdata(pdev);
859 }
860
to_gt(struct drm_i915_private * i915)861 static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
862 {
863 return &i915->gt0;
864 }
865
866 /* Simple iterator over all initialised engines */
867 #define for_each_engine(engine__, dev_priv__, id__) \
868 for ((id__) = 0; \
869 (id__) < I915_NUM_ENGINES; \
870 (id__)++) \
871 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
872
873 /* Iterator over subset of engines selected by mask */
874 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
875 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
876 (tmp__) ? \
877 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
878 0;)
879
880 #define rb_to_uabi_engine(rb) \
881 rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
882
883 #define for_each_uabi_engine(engine__, i915__) \
884 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
885 (engine__); \
886 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
887
888 #define for_each_uabi_class_engine(engine__, class__, i915__) \
889 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
890 (engine__) && (engine__)->uabi_class == (class__); \
891 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
892
893 #define I915_GTT_OFFSET_NONE ((u32)-1)
894
895 /*
896 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
897 * considered to be the frontbuffer for the given plane interface-wise. This
898 * doesn't mean that the hw necessarily already scans it out, but that any
899 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
900 *
901 * We have one bit per pipe and per scanout plane type.
902 */
903 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
904 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
905 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
906 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
907 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
908 })
909 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
910 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
911 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
912 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
913 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
914
915 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
916 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
917 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
918
919 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
920
921 #define IP_VER(ver, rel) ((ver) << 8 | (rel))
922
923 #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
924 #define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
925 INTEL_INFO(i915)->graphics.rel)
926 #define IS_GRAPHICS_VER(i915, from, until) \
927 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
928
929 #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
930 #define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \
931 INTEL_INFO(i915)->media.rel)
932 #define IS_MEDIA_VER(i915, from, until) \
933 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
934
935 #define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver)
936 #define IS_DISPLAY_VER(i915, from, until) \
937 (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
938
939 #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
940
941 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
942
943 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
944 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
945 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
946
947 #define IS_DISPLAY_STEP(__i915, since, until) \
948 (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
949 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
950
951 #define IS_GRAPHICS_STEP(__i915, since, until) \
952 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
953 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
954
955 #define IS_MEDIA_STEP(__i915, since, until) \
956 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
957 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
958
959 static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info * info,enum intel_platform p)960 __platform_mask_index(const struct intel_runtime_info *info,
961 enum intel_platform p)
962 {
963 const unsigned int pbits =
964 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
965
966 /* Expand the platform_mask array if this fails. */
967 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
968 pbits * ARRAY_SIZE(info->platform_mask));
969
970 return p / pbits;
971 }
972
973 static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info * info,enum intel_platform p)974 __platform_mask_bit(const struct intel_runtime_info *info,
975 enum intel_platform p)
976 {
977 const unsigned int pbits =
978 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
979
980 return p % pbits + INTEL_SUBPLATFORM_BITS;
981 }
982
983 static inline u32
intel_subplatform(const struct intel_runtime_info * info,enum intel_platform p)984 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
985 {
986 const unsigned int pi = __platform_mask_index(info, p);
987
988 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
989 }
990
991 static __always_inline bool
IS_PLATFORM(const struct drm_i915_private * i915,enum intel_platform p)992 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
993 {
994 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
995 const unsigned int pi = __platform_mask_index(info, p);
996 const unsigned int pb = __platform_mask_bit(info, p);
997
998 BUILD_BUG_ON(!__builtin_constant_p(p));
999
1000 return info->platform_mask[pi] & BIT(pb);
1001 }
1002
1003 static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private * i915,enum intel_platform p,unsigned int s)1004 IS_SUBPLATFORM(const struct drm_i915_private *i915,
1005 enum intel_platform p, unsigned int s)
1006 {
1007 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1008 const unsigned int pi = __platform_mask_index(info, p);
1009 const unsigned int pb = __platform_mask_bit(info, p);
1010 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1011 const u32 mask = info->platform_mask[pi];
1012
1013 BUILD_BUG_ON(!__builtin_constant_p(p));
1014 BUILD_BUG_ON(!__builtin_constant_p(s));
1015 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1016
1017 /* Shift and test on the MSB position so sign flag can be used. */
1018 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1019 }
1020
1021 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
1022 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
1023
1024 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
1025 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
1026 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
1027 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
1028 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
1029 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
1030 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
1031 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
1032 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
1033 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
1034 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
1035 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
1036 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
1037 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1038 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
1039 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1040 #define IS_IRONLAKE_M(dev_priv) \
1041 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1042 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
1043 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1044 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
1045 INTEL_INFO(dev_priv)->gt == 1)
1046 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1047 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1048 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
1049 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1050 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1051 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
1052 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1053 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1054 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1055 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1056 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1057 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
1058 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
1059 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1060 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1061 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
1062 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
1063 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
1064 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
1065 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
1066 #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
1067
1068 #define IS_DG2_G10(dev_priv) \
1069 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
1070 #define IS_DG2_G11(dev_priv) \
1071 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
1072 #define IS_DG2_G12(dev_priv) \
1073 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
1074 #define IS_ADLS_RPLS(dev_priv) \
1075 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
1076 #define IS_ADLP_N(dev_priv) \
1077 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
1078 #define IS_ADLP_RPLP(dev_priv) \
1079 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
1080 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1081 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1082 #define IS_BDW_ULT(dev_priv) \
1083 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1084 #define IS_BDW_ULX(dev_priv) \
1085 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1086 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
1087 INTEL_INFO(dev_priv)->gt == 3)
1088 #define IS_HSW_ULT(dev_priv) \
1089 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1090 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
1091 INTEL_INFO(dev_priv)->gt == 3)
1092 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
1093 INTEL_INFO(dev_priv)->gt == 1)
1094 /* ULX machines are also considered ULT. */
1095 #define IS_HSW_ULX(dev_priv) \
1096 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1097 #define IS_SKL_ULT(dev_priv) \
1098 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1099 #define IS_SKL_ULX(dev_priv) \
1100 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1101 #define IS_KBL_ULT(dev_priv) \
1102 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1103 #define IS_KBL_ULX(dev_priv) \
1104 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1105 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
1106 INTEL_INFO(dev_priv)->gt == 2)
1107 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
1108 INTEL_INFO(dev_priv)->gt == 3)
1109 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
1110 INTEL_INFO(dev_priv)->gt == 4)
1111 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
1112 INTEL_INFO(dev_priv)->gt == 2)
1113 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
1114 INTEL_INFO(dev_priv)->gt == 3)
1115 #define IS_CFL_ULT(dev_priv) \
1116 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1117 #define IS_CFL_ULX(dev_priv) \
1118 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1119 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1120 INTEL_INFO(dev_priv)->gt == 2)
1121 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1122 INTEL_INFO(dev_priv)->gt == 3)
1123
1124 #define IS_CML_ULT(dev_priv) \
1125 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1126 #define IS_CML_ULX(dev_priv) \
1127 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1128 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
1129 INTEL_INFO(dev_priv)->gt == 2)
1130
1131 #define IS_ICL_WITH_PORT_F(dev_priv) \
1132 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1133
1134 #define IS_TGL_UY(dev_priv) \
1135 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
1136
1137 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
1138
1139 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
1140 (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
1141 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
1142 (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
1143
1144 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
1145 (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
1146 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
1147 (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
1148
1149 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \
1150 (IS_TIGERLAKE(__i915) && \
1151 IS_DISPLAY_STEP(__i915, since, until))
1152
1153 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
1154 (IS_TGL_UY(__i915) && \
1155 IS_GRAPHICS_STEP(__i915, since, until))
1156
1157 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
1158 (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
1159 IS_GRAPHICS_STEP(__i915, since, until))
1160
1161 #define IS_RKL_DISPLAY_STEP(p, since, until) \
1162 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
1163
1164 #define IS_DG1_GRAPHICS_STEP(p, since, until) \
1165 (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
1166 #define IS_DG1_DISPLAY_STEP(p, since, until) \
1167 (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
1168
1169 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
1170 (IS_ALDERLAKE_S(__i915) && \
1171 IS_DISPLAY_STEP(__i915, since, until))
1172
1173 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
1174 (IS_ALDERLAKE_S(__i915) && \
1175 IS_GRAPHICS_STEP(__i915, since, until))
1176
1177 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
1178 (IS_ALDERLAKE_P(__i915) && \
1179 IS_DISPLAY_STEP(__i915, since, until))
1180
1181 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
1182 (IS_ALDERLAKE_P(__i915) && \
1183 IS_GRAPHICS_STEP(__i915, since, until))
1184
1185 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
1186 (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
1187
1188 /*
1189 * DG2 hardware steppings are a bit unusual. The hardware design was forked to
1190 * create three variants (G10, G11, and G12) which each have distinct
1191 * workaround sets. The G11 and G12 forks of the DG2 design reset the GT
1192 * stepping back to "A0" for their first iterations, even though they're more
1193 * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
1194 * functionality and workarounds. However the display stepping does not reset
1195 * in the same manner --- a specific stepping like "B0" has a consistent
1196 * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
1197 *
1198 * TLDR: All GT workarounds and stepping-specific logic must be applied in
1199 * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
1200 * and stepping-specific logic will be applied with a general DG2-wide stepping
1201 * number.
1202 */
1203 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
1204 (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
1205 IS_GRAPHICS_STEP(__i915, since, until))
1206
1207 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \
1208 (IS_DG2(__i915) && \
1209 IS_DISPLAY_STEP(__i915, since, until))
1210
1211 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1212 #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
1213 #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
1214
1215 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1216 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1217
1218 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
1219 unsigned int first__ = (first); \
1220 unsigned int count__ = (count); \
1221 ((gt)->info.engine_mask & \
1222 GENMASK(first__ + count__ - 1, first__)) >> first__; \
1223 })
1224 #define RCS_MASK(gt) \
1225 ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
1226 #define VDBOX_MASK(gt) \
1227 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1228 #define VEBOX_MASK(gt) \
1229 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1230 #define CCS_MASK(gt) \
1231 ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
1232
1233 /*
1234 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1235 * All later gens can run the final buffer from the ppgtt
1236 */
1237 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
1238
1239 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1240 #define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile)
1241 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
1242 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
1243 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
1244 #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
1245
1246 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
1247
1248 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1249 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1250 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1251 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1252
1253 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1254
1255 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1256 #define HAS_PPGTT(dev_priv) \
1257 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1258 #define HAS_FULL_PPGTT(dev_priv) \
1259 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1260
1261 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1262 GEM_BUG_ON((sizes) == 0); \
1263 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1264 })
1265
1266 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
1267 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1268 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1269
1270 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1271 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
1272
1273 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
1274 (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
1275
1276 /* WaRsDisableCoarsePowerGating:skl,cnl */
1277 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1278 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
1279
1280 #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
1281 #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
1282 IS_GEMINILAKE(dev_priv) || \
1283 IS_KABYLAKE(dev_priv))
1284
1285 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1286 * rows, which changed the alignment requirements and fence programming.
1287 */
1288 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
1289 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
1290 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1291 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
1292
1293 #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
1294 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0)
1295 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
1296
1297 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1298
1299 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
1300 #define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
1301
1302 #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
1303 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1304 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
1305 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
1306 #define HAS_PSR_HW_TRACKING(dev_priv) \
1307 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1308 #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
1309 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
1310
1311 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1312 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
1313 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
1314
1315 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
1316
1317 #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
1318
1319 #define HAS_HECI_PXP(dev_priv) \
1320 (INTEL_INFO(dev_priv)->has_heci_pxp)
1321
1322 #define HAS_HECI_GSCFI(dev_priv) \
1323 (INTEL_INFO(dev_priv)->has_heci_gscfi)
1324
1325 #define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
1326
1327 #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
1328
1329 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1330 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1331
1332 #define HAS_MSLICES(dev_priv) \
1333 (INTEL_INFO(dev_priv)->has_mslices)
1334
1335 /*
1336 * Set this flag, when platform requires 64K GTT page sizes or larger for
1337 * device local memory access.
1338 */
1339 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
1340
1341 /*
1342 * Set this flag when platform doesn't allow both 64k pages and 4k pages in
1343 * the same PT. this flag means we need to support compact PT layout for the
1344 * ppGTT when using the 64K GTT pages.
1345 */
1346 #define NEEDS_COMPACT_PT(dev_priv) (INTEL_INFO(dev_priv)->needs_compact_pt)
1347
1348 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
1349
1350 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1351 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1352
1353 /*
1354 * Platform has the dedicated compression control state for each lmem surfaces
1355 * stored in lmem to support the 3D and media compression formats.
1356 */
1357 #define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs)
1358
1359 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
1360
1361 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1362
1363 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
1364
1365 #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
1366 INTEL_INFO(dev_priv)->has_pxp) && \
1367 VDBOX_MASK(to_gt(dev_priv)))
1368
1369 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1370
1371 #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
1372
1373 /* DPF == dynamic parity feature */
1374 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1375 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1376 2 : HAS_L3_DPF(dev_priv))
1377
1378 #define GT_FREQUENCY_MULTIPLIER 50
1379 #define GEN9_FREQ_SCALER 3
1380
1381 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
1382
1383 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
1384
1385 #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
1386
1387 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
1388
1389 /* Only valid when HAS_DISPLAY() is true */
1390 #define INTEL_DISPLAY_ENABLED(dev_priv) \
1391 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1392
1393 #define HAS_GUC_DEPRIVILEGE(dev_priv) \
1394 (INTEL_INFO(dev_priv)->has_guc_deprivilege)
1395
1396 #define HAS_PERCTX_PREEMPT_CTRL(i915) \
1397 ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
1398
1399 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
1400 IS_ALDERLAKE_S(dev_priv))
1401
1402 #define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
1403
1404 /* i915_gem.c */
1405 void i915_gem_init_early(struct drm_i915_private *dev_priv);
1406 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1407
i915_gem_drain_freed_objects(struct drm_i915_private * i915)1408 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1409 {
1410 /*
1411 * A single pass should suffice to release all the freed objects (along
1412 * most call paths) , but be a little more paranoid in that freeing
1413 * the objects does take a little amount of time, during which the rcu
1414 * callbacks could have added new objects into the freed list, and
1415 * armed the work again.
1416 */
1417 while (atomic_read(&i915->mm.free_count)) {
1418 flush_work(&i915->mm.free_work);
1419 flush_delayed_work(&i915->bdev.wq);
1420 rcu_barrier();
1421 }
1422 }
1423
i915_gem_drain_workqueue(struct drm_i915_private * i915)1424 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1425 {
1426 /*
1427 * Similar to objects above (see i915_gem_drain_freed-objects), in
1428 * general we have workers that are armed by RCU and then rearm
1429 * themselves in their callbacks. To be paranoid, we need to
1430 * drain the workqueue a second time after waiting for the RCU
1431 * grace period so that we catch work queued via RCU from the first
1432 * pass. As neither drain_workqueue() nor flush_workqueue() report
1433 * a result, we make an assumption that we only don't require more
1434 * than 3 passes to catch all _recursive_ RCU delayed work.
1435 *
1436 */
1437 int pass = 3;
1438 do {
1439 flush_workqueue(i915->wq);
1440 rcu_barrier();
1441 i915_gem_drain_freed_objects(i915);
1442 } while (--pass);
1443 drain_workqueue(i915->wq);
1444 }
1445
1446 struct i915_vma * __must_check
1447 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1448 struct i915_gem_ww_ctx *ww,
1449 const struct i915_ggtt_view *view,
1450 u64 size, u64 alignment, u64 flags);
1451
1452 struct i915_vma * __must_check
1453 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1454 const struct i915_ggtt_view *view,
1455 u64 size, u64 alignment, u64 flags);
1456
1457 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1458 unsigned long flags);
1459 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1460 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1461 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1462 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
1463 #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
1464
1465 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1466
1467 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1468
1469 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1470 void i915_gem_driver_register(struct drm_i915_private *i915);
1471 void i915_gem_driver_unregister(struct drm_i915_private *i915);
1472 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1473 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1474
1475 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1476
1477 /* intel_device_info.c */
1478 static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private * dev_priv)1479 mkwrite_device_info(struct drm_i915_private *dev_priv)
1480 {
1481 return (struct intel_device_info *)INTEL_INFO(dev_priv);
1482 }
1483
1484 static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,bool always_coherent)1485 i915_coherent_map_type(struct drm_i915_private *i915,
1486 struct drm_i915_gem_object *obj, bool always_coherent)
1487 {
1488 if (i915_gem_object_is_lmem(obj))
1489 return I915_MAP_WC;
1490 if (HAS_LLC(i915) || always_coherent)
1491 return I915_MAP_WB;
1492 else
1493 return I915_MAP_WC;
1494 }
1495
1496 #endif
1497