Lines Matching refs:smmu_pmu

121 struct smmu_pmu {  struct
139 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) argument
155 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); in smmu_pmu_enable() local
158 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_enable()
159 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_enable()
164 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); in smmu_pmu_disable() local
166 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_disable()
167 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_disable()
170 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, in smmu_pmu_counter_set_value() argument
173 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_set_value()
174 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_set_value()
176 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_set_value()
179 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx) in smmu_pmu_counter_get_value() argument
183 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_get_value()
184 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_get_value()
186 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_get_value()
191 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx) in smmu_pmu_counter_enable() argument
193 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); in smmu_pmu_counter_enable()
196 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx) in smmu_pmu_counter_disable() argument
198 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_counter_disable()
201 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx) in smmu_pmu_interrupt_enable() argument
203 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); in smmu_pmu_interrupt_enable()
206 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu, in smmu_pmu_interrupt_disable() argument
209 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_interrupt_disable()
212 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx, in smmu_pmu_set_evtyper() argument
215 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); in smmu_pmu_set_evtyper()
218 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val) in smmu_pmu_set_smr() argument
220 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); in smmu_pmu_set_smr()
226 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_update() local
232 now = smmu_pmu_counter_get_value(smmu_pmu, idx); in smmu_pmu_event_update()
237 delta &= smmu_pmu->counter_mask; in smmu_pmu_event_update()
242 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu, in smmu_pmu_set_period() argument
248 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { in smmu_pmu_set_period()
256 new = smmu_pmu_counter_get_value(smmu_pmu, idx); in smmu_pmu_set_period()
264 new = smmu_pmu->counter_mask >> 1; in smmu_pmu_set_period()
265 smmu_pmu_counter_set_value(smmu_pmu, idx, new); in smmu_pmu_set_period()
274 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_set_event_filter() local
278 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper); in smmu_pmu_set_event_filter()
279 smmu_pmu_set_smr(smmu_pmu, idx, sid); in smmu_pmu_set_event_filter()
295 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, in smmu_pmu_apply_event_filter() argument
299 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; in smmu_pmu_apply_event_filter()
307 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_apply_event_filter()
312 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { in smmu_pmu_apply_event_filter()
318 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { in smmu_pmu_apply_event_filter()
319 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event)); in smmu_pmu_apply_event_filter()
326 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, in smmu_pmu_get_event_idx() argument
330 unsigned int num_ctrs = smmu_pmu->num_counters; in smmu_pmu_get_event_idx()
332 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_get_event_idx()
337 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); in smmu_pmu_get_event_idx()
341 set_bit(idx, smmu_pmu->used_counters); in smmu_pmu_get_event_idx()
367 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_init() local
368 struct device *dev = smmu_pmu->dev; in smmu_pmu_event_init()
389 (!test_bit(event_id, smmu_pmu->supported_events))) { in smmu_pmu_event_init()
399 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
410 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
420 event->cpu = smmu_pmu->on_cpu; in smmu_pmu_event_init()
427 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_start() local
433 smmu_pmu_set_period(smmu_pmu, hwc); in smmu_pmu_event_start()
435 smmu_pmu_counter_enable(smmu_pmu, idx); in smmu_pmu_event_start()
440 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_stop() local
447 smmu_pmu_counter_disable(smmu_pmu, idx); in smmu_pmu_event_stop()
457 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_add() local
459 idx = smmu_pmu_get_event_idx(smmu_pmu, event); in smmu_pmu_event_add()
465 smmu_pmu->events[idx] = event; in smmu_pmu_event_add()
468 smmu_pmu_interrupt_enable(smmu_pmu, idx); in smmu_pmu_event_add()
482 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_del() local
486 smmu_pmu_interrupt_disable(smmu_pmu, idx); in smmu_pmu_event_del()
487 smmu_pmu->events[idx] = NULL; in smmu_pmu_event_del()
488 clear_bit(idx, smmu_pmu->used_counters); in smmu_pmu_event_del()
504 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); in smmu_pmu_cpumask_show() local
506 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); in smmu_pmu_cpumask_show()
552 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); in smmu_pmu_event_is_visible() local
557 if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) in smmu_pmu_event_is_visible()
573 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); in smmu_pmu_identifier_attr_show() local
575 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr); in smmu_pmu_identifier_attr_show()
583 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); in smmu_pmu_identifier_attr_visible() local
585 if (!smmu_pmu->iidr) in smmu_pmu_identifier_attr_visible()
636 struct smmu_pmu *smmu_pmu; in smmu_pmu_offline_cpu() local
639 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node); in smmu_pmu_offline_cpu()
640 if (cpu != smmu_pmu->on_cpu) in smmu_pmu_offline_cpu()
647 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); in smmu_pmu_offline_cpu()
648 smmu_pmu->on_cpu = target; in smmu_pmu_offline_cpu()
649 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target))); in smmu_pmu_offline_cpu()
656 struct smmu_pmu *smmu_pmu = data; in smmu_pmu_handle_irq() local
661 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); in smmu_pmu_handle_irq()
665 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_handle_irq()
668 for_each_set_bit(idx, ovs, smmu_pmu->num_counters) { in smmu_pmu_handle_irq()
669 struct perf_event *event = smmu_pmu->events[idx]; in smmu_pmu_handle_irq()
678 smmu_pmu_set_period(smmu_pmu, hwc); in smmu_pmu_handle_irq()
695 struct smmu_pmu *pmu = dev_get_drvdata(dev); in smmu_pmu_write_msi_msg()
706 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) in smmu_pmu_setup_msi()
730 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu) in smmu_pmu_setup_irq()
744 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu) in smmu_pmu_reset() argument
746 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); in smmu_pmu_reset()
748 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_reset()
752 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_reset()
754 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_reset()
756 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_reset()
759 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) in smmu_pmu_get_acpi_options() argument
763 model = *(u32 *)dev_get_platdata(smmu_pmu->dev); in smmu_pmu_get_acpi_options()
768 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; in smmu_pmu_get_acpi_options()
772 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); in smmu_pmu_get_acpi_options()
775 static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu) in smmu_pmu_coresight_id_regs() argument
777 return of_device_is_compatible(smmu_pmu->dev->of_node, in smmu_pmu_coresight_id_regs()
781 static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu) in smmu_pmu_get_iidr() argument
783 u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR); in smmu_pmu_get_iidr()
785 if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) { in smmu_pmu_get_iidr()
786 u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0); in smmu_pmu_get_iidr()
787 u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1); in smmu_pmu_get_iidr()
788 u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2); in smmu_pmu_get_iidr()
789 u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3); in smmu_pmu_get_iidr()
790 u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4); in smmu_pmu_get_iidr()
807 smmu_pmu->iidr = iidr; in smmu_pmu_get_iidr()
812 struct smmu_pmu *smmu_pmu; in smmu_pmu_probe() local
820 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL); in smmu_pmu_probe()
821 if (!smmu_pmu) in smmu_pmu_probe()
824 smmu_pmu->dev = dev; in smmu_pmu_probe()
825 platform_set_drvdata(pdev, smmu_pmu); in smmu_pmu_probe()
827 smmu_pmu->pmu = (struct pmu) { in smmu_pmu_probe()
842 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0); in smmu_pmu_probe()
843 if (IS_ERR(smmu_pmu->reg_base)) in smmu_pmu_probe()
844 return PTR_ERR(smmu_pmu->reg_base); in smmu_pmu_probe()
846 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); in smmu_pmu_probe()
850 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1); in smmu_pmu_probe()
851 if (IS_ERR(smmu_pmu->reloc_base)) in smmu_pmu_probe()
852 return PTR_ERR(smmu_pmu->reloc_base); in smmu_pmu_probe()
854 smmu_pmu->reloc_base = smmu_pmu->reg_base; in smmu_pmu_probe()
859 smmu_pmu->irq = irq; in smmu_pmu_probe()
861 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); in smmu_pmu_probe()
862 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); in smmu_pmu_probe()
863 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, in smmu_pmu_probe()
866 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; in smmu_pmu_probe()
868 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); in smmu_pmu_probe()
871 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); in smmu_pmu_probe()
873 smmu_pmu_reset(smmu_pmu); in smmu_pmu_probe()
875 err = smmu_pmu_setup_irq(smmu_pmu); in smmu_pmu_probe()
881 smmu_pmu_get_iidr(smmu_pmu); in smmu_pmu_probe()
891 smmu_pmu_get_acpi_options(smmu_pmu); in smmu_pmu_probe()
894 smmu_pmu->on_cpu = raw_smp_processor_id(); in smmu_pmu_probe()
895 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); in smmu_pmu_probe()
898 &smmu_pmu->node); in smmu_pmu_probe()
905 err = perf_pmu_register(&smmu_pmu->pmu, name, -1); in smmu_pmu_probe()
913 &res_0->start, smmu_pmu->num_counters, in smmu_pmu_probe()
914 smmu_pmu->global_filter ? "Global(Counter0)" : in smmu_pmu_probe()
920 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_probe()
926 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); in smmu_pmu_remove() local
928 perf_pmu_unregister(&smmu_pmu->pmu); in smmu_pmu_remove()
929 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_remove()
936 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); in smmu_pmu_shutdown() local
938 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_shutdown()