|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/pmstat: consolidate code into pmstat.c
commit bf0cd071db2a6cdd61e612cf44accad4e0e495e4
Author: Penny Zheng <Penny.Zheng@xxxxxxx>
AuthorDate: Wed Jun 11 11:07:33 2025 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Jun 11 11:07:33 2025 +0200
xen/pmstat: consolidate code into pmstat.c
We move the following functions into drivers/acpi/pmstat.c, as they
are all designed for performance statistic:
- cpufreq_residency_update()
- cpufreq_statistic_reset()
- cpufreq_statistic_update()
- cpufreq_statistic_init()
- cpufreq_statistic_exit()
Consequently, variable "cpufreq_statistic_data" and "cpufreq_statistic_lock"
shall become static.
We also move out acpi_set_pdc_bits(), as it is the handler for sub-hypercall
XEN_PM_PDC, and shall stay with the other handlers together in
drivers/cpufreq/cpufreq.c.
Various style corrections shall be applied at the same time while moving
these
functions, including:
- brace for if() and for() shall live at a seperate line
- add extra space before and after bracket of if() and for()
- use array notation
- convert uint32_t into unsigned int
- convert u32 into uint32_t
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/drivers/acpi/pmstat.c | 202 +++++++++++++++++++++++++-----
xen/drivers/cpufreq/cpufreq.c | 31 +++++
xen/drivers/cpufreq/utility.c | 163 ------------------------
xen/include/acpi/cpufreq/cpufreq.h | 2 -
xen/include/acpi/cpufreq/processor_perf.h | 4 -
5 files changed, 201 insertions(+), 201 deletions(-)
diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c
index 0d570e28bf..521db7e98e 100644
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -41,7 +41,176 @@
#include <acpi/cpufreq/cpufreq.h>
#include <xen/pmstat.h>
-DEFINE_PER_CPU_READ_MOSTLY(struct pm_px *, cpufreq_statistic_data);
+static DEFINE_PER_CPU_READ_MOSTLY(struct pm_px *, cpufreq_statistic_data);
+
+static DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
+
+/*********************************************************************
+ * Px STATISTIC INFO *
+ *********************************************************************/
+
+static void cpufreq_residency_update(unsigned int cpu, uint8_t state)
+{
+ uint64_t now, total_idle_ns;
+ int64_t delta;
+ struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
+
+ total_idle_ns = get_cpu_idle_time(cpu);
+ now = NOW();
+
+ delta = (now - pxpt->prev_state_wall) -
+ (total_idle_ns - pxpt->prev_idle_wall);
+
+ if ( likely(delta >= 0) )
+ pxpt->u.pt[state].residency += delta;
+
+ pxpt->prev_state_wall = now;
+ pxpt->prev_idle_wall = total_idle_ns;
+}
+
+void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
+{
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock =
+ &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pxpt || !pmpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ pxpt->u.last = from;
+ pxpt->u.cur = to;
+ pxpt->u.pt[to].count++;
+
+ cpufreq_residency_update(cpu, from);
+
+ pxpt->u.trans_pt[from * pmpt->perf.state_count + to]++;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+int cpufreq_statistic_init(unsigned int cpu)
+{
+ unsigned int i, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock_init(cpufreq_statistic_lock);
+
+ if ( !pmpt )
+ return -EINVAL;
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return 0;
+ }
+
+ count = pmpt->perf.state_count;
+
+ pxpt = xzalloc(struct pm_px);
+ if ( !pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+ per_cpu(cpufreq_statistic_data, cpu) = pxpt;
+
+ pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
+ if ( !pxpt->u.trans_pt )
+ {
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
+ if ( !pxpt->u.pt )
+ {
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.total = pmpt->perf.state_count;
+ pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
+
+ for ( i = 0; i < pmpt->perf.state_count; i++ )
+ pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+
+ return 0;
+}
+
+void cpufreq_statistic_exit(unsigned int cpu)
+{
+ struct pm_px *pxpt;
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt->u.pt);
+ xfree(pxpt);
+ per_cpu(cpufreq_statistic_data, cpu) = NULL;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+static void cpufreq_statistic_reset(unsigned int cpu)
+{
+ unsigned int i, j, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ count = pmpt->perf.state_count;
+
+ for ( i = 0; i < count; i++ )
+ {
+ pxpt->u.pt[i].residency = 0;
+ pxpt->u.pt[i].count = 0;
+
+ for ( j = 0; j < count; j++ )
+ pxpt->u.trans_pt[i * count + j] = 0;
+ }
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+}
/*
* Get PM statistic info
@@ -521,34 +690,3 @@ int do_pm_op(struct xen_sysctl_pm_op *op)
return ret;
}
-
-int acpi_set_pdc_bits(uint32_t acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
-{
- u32 bits[3];
- int ret;
-
- if ( copy_from_guest(bits, pdc, 2) )
- ret = -EFAULT;
- else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
- ret = -EINVAL;
- else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
- ret = -EFAULT;
- else
- {
- u32 mask = 0;
-
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
- mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
- mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
- mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
- bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
- ACPI_PDC_SMP_C1PT) & ~mask;
- ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
- }
- if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
- ret = -EFAULT;
-
- return ret;
-}
diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c
index 635f6e8c61..f47aad3f75 100644
--- a/xen/drivers/cpufreq/cpufreq.c
+++ b/xen/drivers/cpufreq/cpufreq.c
@@ -589,6 +589,37 @@ out:
return ret;
}
+int acpi_set_pdc_bits(unsigned int acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
+{
+ uint32_t bits[3];
+ int ret;
+
+ if ( copy_from_guest(bits, pdc, 2) )
+ ret = -EFAULT;
+ else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
+ ret = -EINVAL;
+ else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
+ ret = -EFAULT;
+ else
+ {
+ uint32_t mask = 0;
+
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
+ mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
+ mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
+ mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
+ bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
+ ACPI_PDC_SMP_C1PT) & ~mask;
+ ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
+ }
+ if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
+ ret = -EFAULT;
+
+ return ret;
+}
+
static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy)
{
if (usr_max_freq)
diff --git a/xen/drivers/cpufreq/utility.c b/xen/drivers/cpufreq/utility.c
index 309c0682cf..723045b240 100644
--- a/xen/drivers/cpufreq/utility.c
+++ b/xen/drivers/cpufreq/utility.c
@@ -35,169 +35,6 @@ struct cpufreq_driver __read_mostly cpufreq_driver;
struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
-DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
-
-/*********************************************************************
- * Px STATISTIC INFO *
- *********************************************************************/
-
-void cpufreq_residency_update(unsigned int cpu, uint8_t state)
-{
- uint64_t now, total_idle_ns;
- int64_t delta;
- struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
-
- total_idle_ns = get_cpu_idle_time(cpu);
- now = NOW();
-
- delta = (now - pxpt->prev_state_wall) -
- (total_idle_ns - pxpt->prev_idle_wall);
-
- if ( likely(delta >= 0) )
- pxpt->u.pt[state].residency += delta;
-
- pxpt->prev_state_wall = now;
- pxpt->prev_idle_wall = total_idle_ns;
-}
-
-void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
-{
- struct pm_px *pxpt;
- struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pxpt || !pmpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- pxpt->u.last = from;
- pxpt->u.cur = to;
- pxpt->u.pt[to].count++;
-
- cpufreq_residency_update(cpu, from);
-
- (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-int cpufreq_statistic_init(unsigned int cpu)
-{
- uint32_t i, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock_init(cpufreq_statistic_lock);
-
- if ( !pmpt )
- return -EINVAL;
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return 0;
- }
-
- count = pmpt->perf.state_count;
-
- pxpt = xzalloc(struct pm_px);
- if ( !pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
- if (!pxpt->u.trans_pt) {
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
- if (!pxpt->u.pt) {
- xfree(pxpt->u.trans_pt);
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.total = count;
- pxpt->u.usable = count - pmpt->perf.platform_limit;
-
- for ( i = 0; i < count; i++ )
- pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- per_cpu(cpufreq_statistic_data, cpu) = pxpt;
-
- spin_unlock(cpufreq_statistic_lock);
-
- return 0;
-}
-
-void cpufreq_statistic_exit(unsigned int cpu)
-{
- struct pm_px *pxpt;
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if (!pxpt) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- xfree(pxpt->u.trans_pt);
- xfree(pxpt->u.pt);
- xfree(pxpt);
- per_cpu(cpufreq_statistic_data, cpu) = NULL;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-void cpufreq_statistic_reset(unsigned int cpu)
-{
- uint32_t i, j, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- count = pmpt->perf.state_count;
-
- for (i=0; i < count; i++) {
- pxpt->u.pt[i].residency = 0;
- pxpt->u.pt[i].count = 0;
-
- for (j=0; j < count; j++)
- *(pxpt->u.trans_pt + i*count + j) = 0;
- }
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/xen/include/acpi/cpufreq/cpufreq.h
b/xen/include/acpi/cpufreq/cpufreq.h
index a3c84143af..241117a9af 100644
--- a/xen/include/acpi/cpufreq/cpufreq.h
+++ b/xen/include/acpi/cpufreq/cpufreq.h
@@ -20,8 +20,6 @@
#include "processor_perf.h"
-DECLARE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
-
extern bool cpufreq_verbose;
enum cpufreq_xen_opt {
diff --git a/xen/include/acpi/cpufreq/processor_perf.h
b/xen/include/acpi/cpufreq/processor_perf.h
index 301104e16f..6de43f8602 100644
--- a/xen/include/acpi/cpufreq/processor_perf.h
+++ b/xen/include/acpi/cpufreq/processor_perf.h
@@ -9,11 +9,9 @@
unsigned int powernow_register_driver(void);
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag);
-void cpufreq_residency_update(unsigned int cpu, uint8_t state);
void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to);
int cpufreq_statistic_init(unsigned int cpu);
void cpufreq_statistic_exit(unsigned int cpu);
-void cpufreq_statistic_reset(unsigned int cpu);
int cpufreq_limit_change(unsigned int cpu);
@@ -56,7 +54,5 @@ struct pm_px {
uint64_t prev_idle_wall;
};
-DECLARE_PER_CPU(struct pm_px *, cpufreq_statistic_data);
-
int cpufreq_cpu_init(unsigned int cpu);
#endif /* __XEN_PROCESSOR_PM_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |