1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 */
6
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
9
10 #include <linux/perf_event.h>
11 #include <asm/perf_event.h>
12
13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
14 #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
15
16 #ifdef CONFIG_HW_PERF_EVENTS
17
18 struct kvm_pmc {
19 u8 idx; /* index into the pmu->pmc array */
20 struct perf_event *perf_event;
21 };
22
23 struct kvm_pmu_events {
24 u32 events_host;
25 u32 events_guest;
26 };
27
28 struct kvm_pmu {
29 struct irq_work overflow_work;
30 struct kvm_pmu_events events;
31 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
32 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
33 int irq_num;
34 bool created;
35 bool irq_level;
36 };
37
38 struct arm_pmu_entry {
39 struct list_head entry;
40 struct arm_pmu *arm_pmu;
41 };
42
43 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
44
kvm_arm_support_pmu_v3(void)45 static __always_inline bool kvm_arm_support_pmu_v3(void)
46 {
47 return static_branch_likely(&kvm_arm_pmu_available);
48 }
49
50 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
51 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
52 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
53 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
54 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
55 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
56 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
57 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
58 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
59 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
60 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
61 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
62 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
63 void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
64 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
65 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
66 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
67 u64 select_idx);
68 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
69 struct kvm_device_attr *attr);
70 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
71 struct kvm_device_attr *attr);
72 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
73 struct kvm_device_attr *attr);
74 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
75
76 struct kvm_pmu_events *kvm_get_pmu_events(void);
77 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
78 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
79
80 #define kvm_vcpu_has_pmu(vcpu) \
81 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
82
83 /*
84 * Updates the vcpu's view of the pmu events for this cpu.
85 * Must be called before every vcpu run after disabling interrupts, to ensure
86 * that an interrupt cannot fire and update the structure.
87 */
88 #define kvm_pmu_update_vcpu_events(vcpu) \
89 do { \
90 if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
91 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
92 } while (0)
93
94 #else
95 struct kvm_pmu {
96 };
97
kvm_arm_support_pmu_v3(void)98 static inline bool kvm_arm_support_pmu_v3(void)
99 {
100 return false;
101 }
102
103 #define kvm_arm_pmu_irq_initialized(v) (false)
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)104 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
105 u64 select_idx)
106 {
107 return 0;
108 }
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)109 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
110 u64 select_idx, u64 val) {}
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)111 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
112 {
113 return 0;
114 }
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)115 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)116 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)117 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)118 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)119 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)120 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)121 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)122 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
123 {
124 return false;
125 }
kvm_pmu_update_run(struct kvm_vcpu * vcpu)126 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)127 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)128 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)129 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
130 u64 data, u64 select_idx) {}
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)131 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
132 struct kvm_device_attr *attr)
133 {
134 return -ENXIO;
135 }
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)136 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
137 struct kvm_device_attr *attr)
138 {
139 return -ENXIO;
140 }
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)141 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
142 struct kvm_device_attr *attr)
143 {
144 return -ENXIO;
145 }
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)146 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
147 {
148 return 0;
149 }
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)150 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
151 {
152 return 0;
153 }
154
155 #define kvm_vcpu_has_pmu(vcpu) ({ false; })
kvm_pmu_update_vcpu_events(struct kvm_vcpu * vcpu)156 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
kvm_vcpu_pmu_restore_guest(struct kvm_vcpu * vcpu)157 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
kvm_vcpu_pmu_restore_host(struct kvm_vcpu * vcpu)158 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
159
160 #endif
161
162 #endif
163