@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
24
24
25
25
static void kvm_pmu_create_perf_event (struct kvm_pmc * pmc );
26
26
static void kvm_pmu_release_perf_event (struct kvm_pmc * pmc );
27
+ static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc );
27
28
28
29
static struct kvm_vcpu * kvm_pmc_to_vcpu (const struct kvm_pmc * pmc )
29
30
{
@@ -327,65 +328,44 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
327
328
return GENMASK (val - 1 , 0 ) | BIT (ARMV8_PMU_CYCLE_IDX );
328
329
}
329
330
330
- /**
331
- * kvm_pmu_enable_counter_mask - enable selected PMU counters
332
- * @vcpu: The vcpu pointer
333
- * @val: the value guest writes to PMCNTENSET register
334
- *
335
- * Call perf_event_enable to start counting the perf event
336
- */
337
- void kvm_pmu_enable_counter_mask (struct kvm_vcpu * vcpu , u64 val )
331
+ static void kvm_pmc_enable_perf_event (struct kvm_pmc * pmc )
338
332
{
339
- int i ;
340
- if (!kvm_vcpu_has_pmu (vcpu ))
341
- return ;
342
-
343
- if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) || !val )
333
+ if (!pmc -> perf_event ) {
334
+ kvm_pmu_create_perf_event (pmc );
344
335
return ;
336
+ }
345
337
346
- for (i = 0 ; i < KVM_ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
347
- struct kvm_pmc * pmc ;
348
-
349
- if (!(val & BIT (i )))
350
- continue ;
351
-
352
- pmc = kvm_vcpu_idx_to_pmc (vcpu , i );
338
+ perf_event_enable (pmc -> perf_event );
339
+ if (pmc -> perf_event -> state != PERF_EVENT_STATE_ACTIVE )
340
+ kvm_debug ("fail to enable perf event\n" );
341
+ }
353
342
354
- if (!pmc -> perf_event ) {
355
- kvm_pmu_create_perf_event (pmc );
356
- } else {
357
- perf_event_enable (pmc -> perf_event );
358
- if (pmc -> perf_event -> state != PERF_EVENT_STATE_ACTIVE )
359
- kvm_debug ("fail to enable perf event\n" );
360
- }
361
- }
343
+ static void kvm_pmc_disable_perf_event (struct kvm_pmc * pmc )
344
+ {
345
+ if (pmc -> perf_event )
346
+ perf_event_disable (pmc -> perf_event );
362
347
}
363
348
364
- /**
365
- * kvm_pmu_disable_counter_mask - disable selected PMU counters
366
- * @vcpu: The vcpu pointer
367
- * @val: the value guest writes to PMCNTENCLR register
368
- *
369
- * Call perf_event_disable to stop counting the perf event
370
- */
371
- void kvm_pmu_disable_counter_mask (struct kvm_vcpu * vcpu , u64 val )
349
+ void kvm_pmu_reprogram_counter_mask (struct kvm_vcpu * vcpu , u64 val )
372
350
{
373
351
int i ;
374
352
375
353
if (!kvm_vcpu_has_pmu (vcpu ) || !val )
376
354
return ;
377
355
378
356
for (i = 0 ; i < KVM_ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
379
- struct kvm_pmc * pmc ;
357
+ struct kvm_pmc * pmc = kvm_vcpu_idx_to_pmc ( vcpu , i ) ;
380
358
381
359
if (!(val & BIT (i )))
382
360
continue ;
383
361
384
- pmc = kvm_vcpu_idx_to_pmc ( vcpu , i );
385
-
386
- if ( pmc -> perf_event )
387
- perf_event_disable (pmc -> perf_event );
362
+ if ( kvm_pmu_counter_is_enabled ( pmc ))
363
+ kvm_pmc_enable_perf_event ( pmc );
364
+ else
365
+ kvm_pmc_disable_perf_event (pmc );
388
366
}
367
+
368
+ kvm_vcpu_pmu_restore_guest (vcpu );
389
369
}
390
370
391
371
/*
@@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
626
606
if (!kvm_has_feat (vcpu -> kvm , ID_AA64DFR0_EL1 , PMUVer , V3P5 ))
627
607
val &= ~ARMV8_PMU_PMCR_LP ;
628
608
609
+ /* Request a reload of the PMU to enable/disable affected counters */
610
+ if ((__vcpu_sys_reg (vcpu , PMCR_EL0 ) ^ val ) & ARMV8_PMU_PMCR_E )
611
+ kvm_make_request (KVM_REQ_RELOAD_PMU , vcpu );
612
+
629
613
/* The reset bits don't indicate any state, and shouldn't be saved. */
630
614
__vcpu_sys_reg (vcpu , PMCR_EL0 ) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P );
631
615
632
- if (val & ARMV8_PMU_PMCR_E ) {
633
- kvm_pmu_enable_counter_mask (vcpu ,
634
- __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ));
635
- } else {
636
- kvm_pmu_disable_counter_mask (vcpu ,
637
- __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ));
638
- }
639
-
640
616
if (val & ARMV8_PMU_PMCR_C )
641
617
kvm_pmu_set_counter_value (vcpu , ARMV8_PMU_CYCLE_IDX , 0 );
642
618
643
619
if (val & ARMV8_PMU_PMCR_P ) {
644
- unsigned long mask = kvm_pmu_accessible_counter_mask (vcpu );
645
- mask &= ~BIT (ARMV8_PMU_CYCLE_IDX );
620
+ /*
621
+ * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
622
+ * to the 'guest' range of counters and never the 'hyp' range.
623
+ */
624
+ unsigned long mask = kvm_pmu_implemented_counter_mask (vcpu ) &
625
+ ~kvm_pmu_hyp_counter_mask (vcpu ) &
626
+ ~BIT (ARMV8_PMU_CYCLE_IDX );
627
+
646
628
for_each_set_bit (i , & mask , 32 )
647
629
kvm_pmu_set_pmc_value (kvm_vcpu_idx_to_pmc (vcpu , i ), 0 , true);
648
630
}
649
- kvm_vcpu_pmu_restore_guest (vcpu );
650
631
}
651
632
652
633
static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc )
@@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
910
891
{
911
892
u64 mask = kvm_pmu_implemented_counter_mask (vcpu );
912
893
913
- kvm_pmu_handle_pmcr (vcpu , kvm_vcpu_read_pmcr (vcpu ));
914
-
915
894
__vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= mask ;
916
895
__vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= mask ;
917
896
__vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= mask ;
897
+
898
+ kvm_pmu_reprogram_counter_mask (vcpu , mask );
918
899
}
919
900
920
901
int kvm_arm_pmu_v3_enable (struct kvm_vcpu * vcpu )
0 commit comments