Lines Matching refs:cluster
243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
254 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
348 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) in l2_cache_get_event_idx()
354 idx = find_first_zero_bit(cluster->used_counters, num_ctrs); in l2_cache_get_event_idx()
365 if (test_bit(group, cluster->used_groups)) in l2_cache_get_event_idx()
368 set_bit(idx, cluster->used_counters); in l2_cache_get_event_idx()
369 set_bit(group, cluster->used_groups); in l2_cache_get_event_idx()
374 static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, in l2_cache_clear_event_idx() argument
380 clear_bit(idx, cluster->used_counters); in l2_cache_clear_event_idx()
382 clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); in l2_cache_clear_event_idx()
387 struct cluster_pmu *cluster = data; in l2_cache_handle_irq() local
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
396 for_each_set_bit(idx, cluster->used_counters, num_counters) { in l2_cache_handle_irq()
397 struct perf_event *event = cluster->events[idx]; in l2_cache_handle_irq()
409 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_handle_irq()
441 struct cluster_pmu *cluster; in l2_cache_event_init() local
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
489 if (!cluster) { in l2_cache_event_init()
498 (cluster->on_cpu != event->group_leader->cpu)) { in l2_cache_event_init()
536 event->cpu = cluster->on_cpu; in l2_cache_event_init()
543 struct cluster_pmu *cluster; in l2_cache_event_start() local
551 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_start()
553 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_event_start()
564 cluster_pmu_set_resr(cluster, event_group, event_cc); in l2_cache_event_start()
593 struct cluster_pmu *cluster; in l2_cache_event_add() local
595 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_add()
597 idx = l2_cache_get_event_idx(cluster, event); in l2_cache_event_add()
603 cluster->events[idx] = event; in l2_cache_event_add()
618 struct cluster_pmu *cluster; in l2_cache_event_del() local
621 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_del()
624 cluster->events[idx] = NULL; in l2_cache_event_del()
625 l2_cache_clear_event_idx(cluster, event); in l2_cache_event_del()
739 struct cluster_pmu *cluster; in l2_cache_associate_cpu_with_cluster() local
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
753 if (cluster->cluster_id != cpu_cluster_id) in l2_cache_associate_cpu_with_cluster()
758 cluster->cluster_id); in l2_cache_associate_cpu_with_cluster()
759 cpumask_set_cpu(cpu, &cluster->cluster_cpus); in l2_cache_associate_cpu_with_cluster()
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
761 return cluster; in l2_cache_associate_cpu_with_cluster()
769 struct cluster_pmu *cluster; in l2cache_pmu_online_cpu() local
773 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
774 if (!cluster) { in l2cache_pmu_online_cpu()
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
777 if (!cluster) { in l2cache_pmu_online_cpu()
785 if (cluster->on_cpu != -1) in l2cache_pmu_online_cpu()
792 cluster->on_cpu = cpu; in l2cache_pmu_online_cpu()
796 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); in l2cache_pmu_online_cpu()
797 enable_irq(cluster->irq); in l2cache_pmu_online_cpu()
804 struct cluster_pmu *cluster; in l2cache_pmu_offline_cpu() local
810 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
811 if (!cluster) in l2cache_pmu_offline_cpu()
815 if (cluster->on_cpu != cpu) in l2cache_pmu_offline_cpu()
820 cluster->on_cpu = -1; in l2cache_pmu_offline_cpu()
823 cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, in l2cache_pmu_offline_cpu()
827 disable_irq(cluster->irq); in l2cache_pmu_offline_cpu()
832 cluster->on_cpu = target; in l2cache_pmu_offline_cpu()
834 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); in l2cache_pmu_offline_cpu()
845 struct cluster_pmu *cluster; in l2_cache_pmu_probe_cluster() local
855 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); in l2_cache_pmu_probe_cluster()
856 if (!cluster) in l2_cache_pmu_probe_cluster()
859 INIT_LIST_HEAD(&cluster->next); in l2_cache_pmu_probe_cluster()
860 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
861 cluster->cluster_id = fw_cluster_id; in l2_cache_pmu_probe_cluster()
866 cluster->irq = irq; in l2_cache_pmu_probe_cluster()
868 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
869 cluster->on_cpu = -1; in l2_cache_pmu_probe_cluster()
874 "l2-cache-pmu", cluster); in l2_cache_pmu_probe_cluster()
884 spin_lock_init(&cluster->pmu_lock); in l2_cache_pmu_probe_cluster()