Lines Matching refs:gvt

63 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)  in intel_gvt_get_device_type()  argument
65 struct drm_i915_private *i915 = gvt->gt->i915; in intel_gvt_get_device_type()
81 static bool intel_gvt_match_device(struct intel_gvt *gvt, in intel_gvt_match_device() argument
84 return intel_gvt_get_device_type(gvt) & device; in intel_gvt_match_device()
99 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, in intel_gvt_find_mmio_info() argument
104 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { in intel_gvt_find_mmio_info()
111 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, in setup_mmio_info() argument
118 if (!intel_gvt_match_device(gvt, device)) in setup_mmio_info()
128 p = intel_gvt_find_mmio_info(gvt, i); in setup_mmio_info()
135 gvt->mmio.mmio_attribute[i / 4] = flags; in setup_mmio_info()
153 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) in intel_gvt_render_mmio_to_engine() argument
159 for_each_engine(engine, gvt->gt, id) in intel_gvt_render_mmio_to_engine()
220 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) { in gamw_echo_dev_rw_ia_write()
256 struct intel_gvt *gvt = vgpu->gvt; in fence_mmio_write() local
265 mmio_hw_access_pre(gvt->gt); in fence_mmio_write()
268 mmio_hw_access_post(gvt->gt); in fence_mmio_write()
286 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) { in mul_force_wake_write()
348 engine_mask &= vgpu->gvt->gt->info.engine_mask; in gdrst_mmio_write()
507 refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; in bdw_vgpu_get_dp_bitrate()
538 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; in bxt_vgpu_get_dp_bitrate()
649 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in vgpu_update_refresh_rate()
775 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in force_nonpriv_write()
1011 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in pri_surf_mmio_write()
1052 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in reg50080_mmio_write()
1076 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in trigger_aux_channel_interrupt()
1179 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) in dp_aux_ch_ctl_mmio_write()
1183 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) && in dp_aux_ch_ctl_mmio_write()
1499 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj; in send_display_ready_uevent()
1560 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in pf_write()
1616 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in dma_ctrl_write()
1635 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in gen9_trtte_write()
1687 if (IS_SKYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1688 IS_KABYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1689 IS_COFFEELAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1690 IS_COMETLAKE(vgpu->gvt->gt->i915)) { in mailbox_write()
1700 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) { in mailbox_write()
1713 if (IS_SKYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1714 IS_KABYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1715 IS_COFFEELAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1716 IS_COMETLAKE(vgpu->gvt->gt->i915)) in mailbox_write()
1741 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in hws_pga_write()
1772 if (IS_BROXTON(vgpu->gvt->gt->i915)) in skl_power_well_ctl_write()
1946 struct intel_gvt *gvt = vgpu->gvt; in mmio_read_from_hw() local
1948 intel_gvt_render_mmio_to_engine(gvt, offset); in mmio_read_from_hw()
1958 vgpu == gvt->scheduler.engine_owner[engine->id] || in mmio_read_from_hw()
1961 mmio_hw_access_pre(gvt->gt); in mmio_read_from_hw()
1963 intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); in mmio_read_from_hw()
1964 mmio_hw_access_post(gvt->gt); in mmio_read_from_hw()
1973 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in elsp_mmio_write()
1974 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in elsp_mmio_write()
2017 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in ring_mode_mmio_write()
2022 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) || in ring_mode_mmio_write()
2023 IS_COMETLAKE(vgpu->gvt->gt->i915)) in ring_mode_mmio_write()
2032 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) || in ring_mode_mmio_write()
2033 IS_COMETLAKE(vgpu->gvt->gt->i915)) && in ring_mode_mmio_write()
2137 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2163 if (HAS_ENGINE(gvt->gt, VCS1)) \
2179 static int init_generic_mmio_info(struct intel_gvt *gvt) in init_generic_mmio_info() argument
2181 struct drm_i915_private *dev_priv = gvt->gt->i915; in init_generic_mmio_info()
2442 static int init_bdw_mmio_info(struct intel_gvt *gvt) in init_bdw_mmio_info() argument
2577 static int init_skl_mmio_info(struct intel_gvt *gvt) in init_skl_mmio_info() argument
2579 struct drm_i915_private *dev_priv = gvt->gt->i915; in init_skl_mmio_info()
2749 static int init_bxt_mmio_info(struct intel_gvt *gvt) in init_bxt_mmio_info() argument
2797 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, in find_mmio_block() argument
2800 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in find_mmio_block()
2801 int num = gvt->mmio.num_mmio_block; in find_mmio_block()
2820 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) in intel_gvt_clean_mmio_info() argument
2826 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) in intel_gvt_clean_mmio_info()
2829 kfree(gvt->mmio.mmio_block); in intel_gvt_clean_mmio_info()
2830 gvt->mmio.mmio_block = NULL; in intel_gvt_clean_mmio_info()
2831 gvt->mmio.num_mmio_block = 0; in intel_gvt_clean_mmio_info()
2833 vfree(gvt->mmio.mmio_attribute); in intel_gvt_clean_mmio_info()
2834 gvt->mmio.mmio_attribute = NULL; in intel_gvt_clean_mmio_info()
2840 struct intel_gvt *gvt = iter->data; in handle_mmio() local
2851 p = intel_gvt_find_mmio_info(gvt, i); in handle_mmio()
2870 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); in handle_mmio()
2871 gvt->mmio.num_tracked_mmio++; in handle_mmio()
2879 struct intel_gvt *gvt = iter->data; in handle_mmio_block() local
2880 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in handle_mmio_block()
2884 (gvt->mmio.num_mmio_block + 1) * sizeof(*block), in handle_mmio_block()
2889 gvt->mmio.mmio_block = block = ret; in handle_mmio_block()
2891 block += gvt->mmio.num_mmio_block; in handle_mmio_block()
2898 gvt->mmio.num_mmio_block++; in handle_mmio_block()
2912 static int init_mmio_info(struct intel_gvt *gvt) in init_mmio_info() argument
2915 .i915 = gvt->gt->i915, in init_mmio_info()
2916 .data = gvt, in init_mmio_info()
2923 static int init_mmio_block_handlers(struct intel_gvt *gvt) in init_mmio_block_handlers() argument
2927 block = find_mmio_block(gvt, VGT_PVINFO_PAGE); in init_mmio_block_handlers()
2930 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); in init_mmio_block_handlers()
2950 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) in intel_gvt_setup_mmio_info() argument
2952 struct intel_gvt_device_info *info = &gvt->device_info; in intel_gvt_setup_mmio_info()
2953 struct drm_i915_private *i915 = gvt->gt->i915; in intel_gvt_setup_mmio_info()
2954 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); in intel_gvt_setup_mmio_info()
2957 gvt->mmio.mmio_attribute = vzalloc(size); in intel_gvt_setup_mmio_info()
2958 if (!gvt->mmio.mmio_attribute) in intel_gvt_setup_mmio_info()
2961 ret = init_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2965 ret = init_mmio_block_handlers(gvt); in intel_gvt_setup_mmio_info()
2969 ret = init_generic_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2974 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2981 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2984 ret = init_skl_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2988 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2991 ret = init_skl_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2994 ret = init_bxt_mmio_info(gvt); in intel_gvt_setup_mmio_info()
3001 intel_gvt_clean_mmio_info(gvt); in intel_gvt_setup_mmio_info()
3014 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, in intel_gvt_for_each_tracked_mmio() argument
3015 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), in intel_gvt_for_each_tracked_mmio() argument
3018 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in intel_gvt_for_each_tracked_mmio()
3022 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) { in intel_gvt_for_each_tracked_mmio()
3023 ret = handler(gvt, e->offset, data); in intel_gvt_for_each_tracked_mmio()
3028 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { in intel_gvt_for_each_tracked_mmio()
3034 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); in intel_gvt_for_each_tracked_mmio()
3111 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, in intel_gvt_in_force_nonpriv_whitelist() argument
3131 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_mmio_reg_rw()
3132 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_mmio_reg_rw() local
3144 mmio_block = find_mmio_block(gvt, offset); in intel_vgpu_mmio_reg_rw()
3155 mmio_info = intel_gvt_find_mmio_info(gvt, offset); in intel_vgpu_mmio_reg_rw()
3168 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { in intel_vgpu_mmio_reg_rw()
3186 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { in intel_vgpu_mmio_reg_rw()
3202 void intel_gvt_restore_fence(struct intel_gvt *gvt) in intel_gvt_restore_fence() argument
3207 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { in intel_gvt_restore_fence()
3208 mmio_hw_access_pre(gvt->gt); in intel_gvt_restore_fence()
3211 mmio_hw_access_post(gvt->gt); in intel_gvt_restore_fence()
3215 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data) in mmio_pm_restore_handler() argument
3218 struct drm_i915_private *dev_priv = gvt->gt->i915; in mmio_pm_restore_handler()
3220 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) in mmio_pm_restore_handler()
3226 void intel_gvt_restore_mmio(struct intel_gvt *gvt) in intel_gvt_restore_mmio() argument
3231 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { in intel_gvt_restore_mmio()
3232 mmio_hw_access_pre(gvt->gt); in intel_gvt_restore_mmio()
3233 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); in intel_gvt_restore_mmio()
3234 mmio_hw_access_post(gvt->gt); in intel_gvt_restore_mmio()