summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/pmu.c')
-rw-r--r--virt/kvm/arm/pmu.c114
1 files changed, 69 insertions, 45 deletions
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 8731dfeced8b..f0d0312c0a55 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -15,6 +15,8 @@
#include <kvm/arm_vgic.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
@@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
return pmc;
}
+static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
+{
+ if (kvm_pmu_idx_is_high_counter(pmc->idx))
+ return pmc - 1;
+ else
+ return pmc + 1;
+}
/**
* kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
@@ -238,10 +247,11 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
*/
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
{
- int i;
+ unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ int i;
- for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+ for_each_set_bit(i, &mask, 32)
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
@@ -294,15 +304,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
pmc = &pmu->pmc[i];
- /*
- * For high counters of chained events we must recreate the
- * perf event with the long (64bit) attribute set.
- */
- if (kvm_pmu_pmc_is_chained(pmc) &&
- kvm_pmu_idx_is_high_counter(i)) {
- kvm_pmu_create_perf_event(vcpu, i);
- continue;
- }
+ /* A change in the enable state may affect the chain state */
+ kvm_pmu_update_pmc_chained(vcpu, i);
+ kvm_pmu_create_perf_event(vcpu, i);
/* At this point, pmc must be the canonical */
if (pmc->perf_event) {
@@ -335,15 +339,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
pmc = &pmu->pmc[i];
- /*
- * For high counters of chained events we must recreate the
- * perf event with the long (64bit) attribute unset.
- */
- if (kvm_pmu_pmc_is_chained(pmc) &&
- kvm_pmu_idx_is_high_counter(i)) {
- kvm_pmu_create_perf_event(vcpu, i);
- continue;
- }
+ /* A change in the enable state may affect the chain state */
+ kvm_pmu_update_pmc_chained(vcpu, i);
+ kvm_pmu_create_perf_event(vcpu, i);
/* At this point, pmc must be the canonical */
if (pmc->perf_event)
@@ -480,25 +478,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
*/
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
- u64 type, enable, reg;
- if (val == 0)
+ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
- enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ /* Weed out disabled counters */
+ val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+ u64 type, reg;
+
if (!(val & BIT(i)))
continue;
- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
- & ARMV8_PMU_EVTYPE_EVENT;
- if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
- && (enable & BIT(i))) {
- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+
+ /* PMSWINC only applies to ... SW_INC! */
+ type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+ type &= ARMV8_PMU_EVTYPE_EVENT;
+ if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
+ continue;
+
+ /* increment this even SW_INC counter */
+ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+
+ if (reg) /* no overflow on the low part */
+ continue;
+
+ if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
+ /* increment the high counter */
+ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
reg = lower_32_bits(reg);
- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
- if (!reg)
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
+ if (!reg) /* mark overflow on the high counter */
+ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
+ } else {
+ /* mark overflow on low counter */
+ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
}
}
}
@@ -510,10 +528,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
*/
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
- u64 mask;
+ unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
int i;
- mask = kvm_pmu_valid_counter_mask(vcpu);
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
@@ -525,7 +542,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+ for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
}
@@ -582,15 +599,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
- if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+ if (kvm_pmu_pmc_is_chained(pmc)) {
/**
* The initial sample period (overflow count) of an event. For
* chained counters we only support overflow interrupts on the
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
- if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
- attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+ attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
@@ -621,25 +637,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
* @select_idx: The number of selected counter
*
* Update the chained bitmap based on the event type written in the
- * typer register.
+ * typer register and the enable state of the odd register.
*/
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
+ bool new_state, old_state;
+
+ old_state = kvm_pmu_pmc_is_chained(pmc);
+ new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
+ kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
+
+ if (old_state == new_state)
+ return;
- if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+ canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
+ kvm_pmu_stop_counter(vcpu, canonical_pmc);
+ if (new_state) {
/*
* During promotion from !chained to chained we must ensure
* the adjacent counter is stopped and its event destroyed
*/
- if (!kvm_pmu_pmc_is_chained(pmc))
- kvm_pmu_stop_counter(vcpu, pmc);
-
+ kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
- } else {
- clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+ return;
}
+ clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
}
/**