Lines Matching refs:cpuhw

128 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)  in ebb_switch_in()  argument
130 return cpuhw->mmcr.mmcr0; in ebb_switch_in()
136 static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} in power_pmu_bhrb_read() argument
157 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in get_pmcs_ext_regs() local
159 return cpuhw->pmcs[idx]; in get_pmcs_ext_regs()
416 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_bhrb_enable() local
422 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { in power_pmu_bhrb_enable()
424 cpuhw->bhrb_context = event->ctx; in power_pmu_bhrb_enable()
426 cpuhw->bhrb_users++; in power_pmu_bhrb_enable()
432 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_bhrb_disable() local
437 WARN_ON_ONCE(!cpuhw->bhrb_users); in power_pmu_bhrb_disable()
438 cpuhw->bhrb_users--; in power_pmu_bhrb_disable()
441 if (!cpuhw->disabled && !cpuhw->bhrb_users) { in power_pmu_bhrb_disable()
447 cpuhw->bhrb_context = NULL; in power_pmu_bhrb_disable()
490 static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) in power_pmu_bhrb_read() argument
546 cpuhw->bhrb_entries[u_index].to = addr; in power_pmu_bhrb_read()
547 cpuhw->bhrb_entries[u_index].mispred = pred; in power_pmu_bhrb_read()
548 cpuhw->bhrb_entries[u_index].predicted = ~pred; in power_pmu_bhrb_read()
559 cpuhw->bhrb_entries[u_index].from = addr; in power_pmu_bhrb_read()
563 cpuhw->bhrb_entries[u_index].from = addr; in power_pmu_bhrb_read()
564 cpuhw->bhrb_entries[u_index].to = in power_pmu_bhrb_read()
566 cpuhw->bhrb_entries[u_index].mispred = pred; in power_pmu_bhrb_read()
567 cpuhw->bhrb_entries[u_index].predicted = ~pred; in power_pmu_bhrb_read()
573 cpuhw->bhrb_stack.nr = u_index; in power_pmu_bhrb_read()
574 cpuhw->bhrb_stack.hw_idx = -1ULL; in power_pmu_bhrb_read()
647 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) in ebb_switch_in() argument
649 unsigned long mmcr0 = cpuhw->mmcr.mmcr0; in ebb_switch_in()
683 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2); in ebb_switch_in()
795 struct cpu_hw_events *cpuhw; in power_pmu_wants_prompt_pmi() local
804 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_wants_prompt_pmi()
805 return cpuhw->n_events; in power_pmu_wants_prompt_pmi()
889 static int any_pmc_overflown(struct cpu_hw_events *cpuhw) in any_pmc_overflown() argument
893 for (i = 0; i < cpuhw->n_events; i++) { in any_pmc_overflown()
894 idx = cpuhw->event[i]->hw.idx; in any_pmc_overflown()
969 static int power_check_constraints(struct cpu_hw_events *cpuhw, in power_check_constraints() argument
990 cpuhw->alternatives[i]); in power_check_constraints()
991 event_id[i] = cpuhw->alternatives[i][0]; in power_check_constraints()
993 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], in power_check_constraints()
994 &cpuhw->avalues[i][0], event[i]->attr.config1)) in power_check_constraints()
999 nv = (value | cpuhw->avalues[i][0]) + in power_check_constraints()
1000 (value & cpuhw->avalues[i][0] & addf); in power_check_constraints()
1005 if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0]) in power_check_constraints()
1010 mask |= cpuhw->amasks[i][0]; in power_check_constraints()
1025 cpuhw->alternatives[i]); in power_check_constraints()
1027 ppmu->get_constraint(cpuhw->alternatives[i][j], in power_check_constraints()
1028 &cpuhw->amasks[i][j], in power_check_constraints()
1029 &cpuhw->avalues[i][j], in power_check_constraints()
1049 nv = (value | cpuhw->avalues[i][j]) + in power_check_constraints()
1050 (value & cpuhw->avalues[i][j] & addf); in power_check_constraints()
1052 (((nv + tadd) ^ cpuhw->avalues[i][j]) in power_check_constraints()
1053 & cpuhw->amasks[i][j]) == 0) in power_check_constraints()
1075 mask |= cpuhw->amasks[i][j]; in power_check_constraints()
1083 event_id[i] = cpuhw->alternatives[i][choice[i]]; in power_check_constraints()
1217 static void freeze_limited_counters(struct cpu_hw_events *cpuhw, in freeze_limited_counters() argument
1224 for (i = 0; i < cpuhw->n_limited; ++i) { in freeze_limited_counters()
1225 event = cpuhw->limited_counter[i]; in freeze_limited_counters()
1237 static void thaw_limited_counters(struct cpu_hw_events *cpuhw, in thaw_limited_counters() argument
1244 for (i = 0; i < cpuhw->n_limited; ++i) { in thaw_limited_counters()
1245 event = cpuhw->limited_counter[i]; in thaw_limited_counters()
1246 event->hw.idx = cpuhw->limited_hwidx[i]; in thaw_limited_counters()
1266 static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) in write_mmcr0() argument
1270 if (!cpuhw->n_limited) { in write_mmcr0()
1289 freeze_limited_counters(cpuhw, pmc5, pmc6); in write_mmcr0()
1291 thaw_limited_counters(cpuhw, pmc5, pmc6); in write_mmcr0()
1307 struct cpu_hw_events *cpuhw; in power_pmu_disable() local
1313 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_disable()
1315 if (!cpuhw->disabled) { in power_pmu_disable()
1319 if (!cpuhw->pmcs_enabled) { in power_pmu_disable()
1321 cpuhw->pmcs_enabled = 1; in power_pmu_disable()
1342 write_mmcr0(cpuhw, val); in power_pmu_disable()
1369 val = mmcra = cpuhw->mmcr.mmcra; in power_pmu_disable()
1374 if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) in power_pmu_disable()
1391 cpuhw->disabled = 1; in power_pmu_disable()
1392 cpuhw->n_added = 0; in power_pmu_disable()
1421 struct cpu_hw_events *cpuhw; in power_pmu_enable() local
1435 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_enable()
1436 if (!cpuhw->disabled) in power_pmu_enable()
1439 if (cpuhw->n_events == 0) { in power_pmu_enable()
1444 cpuhw->disabled = 0; in power_pmu_enable()
1451 ebb = is_ebb_event(cpuhw->event[0]); in power_pmu_enable()
1459 if (!cpuhw->n_added) { in power_pmu_enable()
1466 if (any_pmc_overflown(cpuhw)) in power_pmu_enable()
1469 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); in power_pmu_enable()
1470 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); in power_pmu_enable()
1472 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); in power_pmu_enable()
1479 memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); in power_pmu_enable()
1481 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, in power_pmu_enable()
1482 &cpuhw->mmcr, cpuhw->event, ppmu->flags)) { in power_pmu_enable()
1494 event = cpuhw->event[0]; in power_pmu_enable()
1496 cpuhw->mmcr.mmcr0 |= MMCR0_FCP; in power_pmu_enable()
1498 cpuhw->mmcr.mmcr0 |= freeze_events_kernel; in power_pmu_enable()
1500 cpuhw->mmcr.mmcr0 |= MMCR0_FCHV; in power_pmu_enable()
1509 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); in power_pmu_enable()
1510 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); in power_pmu_enable()
1511 mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) in power_pmu_enable()
1514 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2); in power_pmu_enable()
1517 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); in power_pmu_enable()
1523 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_enable()
1524 event = cpuhw->event[i]; in power_pmu_enable()
1535 cpuhw->n_limited = n_lim = 0; in power_pmu_enable()
1536 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_enable()
1537 event = cpuhw->event[i]; in power_pmu_enable()
1542 cpuhw->limited_counter[n_lim] = event; in power_pmu_enable()
1543 cpuhw->limited_hwidx[n_lim] = idx; in power_pmu_enable()
1567 cpuhw->n_limited = n_lim; in power_pmu_enable()
1568 cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE; in power_pmu_enable()
1573 mmcr0 = ebb_switch_in(ebb, cpuhw); in power_pmu_enable()
1576 if (cpuhw->bhrb_users) in power_pmu_enable()
1577 ppmu->config_bhrb(cpuhw->bhrb_filter); in power_pmu_enable()
1579 write_mmcr0(cpuhw, mmcr0); in power_pmu_enable()
1584 if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) { in power_pmu_enable()
1586 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra); in power_pmu_enable()
1629 struct cpu_hw_events *cpuhw; in power_pmu_add() local
1641 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_add()
1642 n0 = cpuhw->n_events; in power_pmu_add()
1645 cpuhw->event[n0] = event; in power_pmu_add()
1646 cpuhw->events[n0] = event->hw.config; in power_pmu_add()
1647 cpuhw->flags[n0] = event->hw.event_base; in power_pmu_add()
1665 if (cpuhw->txn_flags & PERF_PMU_TXN_ADD) in power_pmu_add()
1668 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) in power_pmu_add()
1670 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event)) in power_pmu_add()
1672 event->hw.config = cpuhw->events[n0]; in power_pmu_add()
1677 ++cpuhw->n_events; in power_pmu_add()
1678 ++cpuhw->n_added; in power_pmu_add()
1690 cpuhw->bhrb_filter = bhrb_filter; in power_pmu_add()
1705 struct cpu_hw_events *cpuhw; in power_pmu_del() local
1714 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_del()
1715 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_del()
1716 if (event == cpuhw->event[i]) { in power_pmu_del()
1717 while (++i < cpuhw->n_events) { in power_pmu_del()
1718 cpuhw->event[i-1] = cpuhw->event[i]; in power_pmu_del()
1719 cpuhw->events[i-1] = cpuhw->events[i]; in power_pmu_del()
1720 cpuhw->flags[i-1] = cpuhw->flags[i]; in power_pmu_del()
1722 --cpuhw->n_events; in power_pmu_del()
1723 ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr); in power_pmu_del()
1732 for (i = 0; i < cpuhw->n_limited; ++i) in power_pmu_del()
1733 if (event == cpuhw->limited_counter[i]) in power_pmu_del()
1735 if (i < cpuhw->n_limited) { in power_pmu_del()
1736 while (++i < cpuhw->n_limited) { in power_pmu_del()
1737 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; in power_pmu_del()
1738 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; in power_pmu_del()
1740 --cpuhw->n_limited; in power_pmu_del()
1742 if (cpuhw->n_events == 0) { in power_pmu_del()
1744 cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE); in power_pmu_del()
1824 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_start_txn() local
1826 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ in power_pmu_start_txn()
1828 cpuhw->txn_flags = txn_flags; in power_pmu_start_txn()
1833 cpuhw->n_txn_start = cpuhw->n_events; in power_pmu_start_txn()
1843 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_cancel_txn() local
1846 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ in power_pmu_cancel_txn()
1848 txn_flags = cpuhw->txn_flags; in power_pmu_cancel_txn()
1849 cpuhw->txn_flags = 0; in power_pmu_cancel_txn()
1863 struct cpu_hw_events *cpuhw; in power_pmu_commit_txn() local
1869 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_commit_txn()
1870 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ in power_pmu_commit_txn()
1872 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { in power_pmu_commit_txn()
1873 cpuhw->txn_flags = 0; in power_pmu_commit_txn()
1877 n = cpuhw->n_events; in power_pmu_commit_txn()
1878 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) in power_pmu_commit_txn()
1880 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event); in power_pmu_commit_txn()
1884 for (i = cpuhw->n_txn_start; i < n; ++i) in power_pmu_commit_txn()
1885 cpuhw->event[i]->hw.config = cpuhw->events[i]; in power_pmu_commit_txn()
1887 cpuhw->txn_flags = 0; in power_pmu_commit_txn()
2013 struct cpu_hw_events *cpuhw; in power_pmu_event_init() local
2127 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_event_init()
2129 err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs); in power_pmu_event_init()
2142 cpuhw->bhrb_filter = bhrb_filter; in power_pmu_event_init()
2296 struct cpu_hw_events *cpuhw; in record_and_restart() local
2297 cpuhw = this_cpu_ptr(&cpu_hw_events); in record_and_restart()
2298 power_pmu_bhrb_read(event, cpuhw); in record_and_restart()
2299 data.br_stack = &cpuhw->bhrb_stack; in record_and_restart()
2380 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in __perf_event_interrupt() local
2384 if (cpuhw->n_limited) in __perf_event_interrupt()
2385 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), in __perf_event_interrupt()
2392 cpuhw->pmcs[i] = read_pmc(i + 1); in __perf_event_interrupt()
2397 if (!pmc_overflow(cpuhw->pmcs[i])) in __perf_event_interrupt()
2408 for (j = 0; j < cpuhw->n_events; ++j) { in __perf_event_interrupt()
2409 event = cpuhw->event[j]; in __perf_event_interrupt()
2412 record_and_restart(event, cpuhw->pmcs[i], regs); in __perf_event_interrupt()
2430 for (i = 0; i < cpuhw->n_events; ++i) { in __perf_event_interrupt()
2431 event = cpuhw->event[i]; in __perf_event_interrupt()
2434 if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) { in __perf_event_interrupt()
2438 cpuhw->pmcs[event->hw.idx - 1], in __perf_event_interrupt()
2460 write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); in __perf_event_interrupt()
2463 memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs)); in __perf_event_interrupt()
2477 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); in power_pmu_prepare_cpu() local
2480 memset(cpuhw, 0, sizeof(*cpuhw)); in power_pmu_prepare_cpu()
2481 cpuhw->mmcr.mmcr0 = MMCR0_FC; in power_pmu_prepare_cpu()