[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] cpufreq: finish genapic conversion to altcall


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 15 Jan 2024 17:10:29 +0100
  • Autocrypt: addr=jbeulich@xxxxxxxx; keydata= xsDiBFk3nEQRBADAEaSw6zC/EJkiwGPXbWtPxl2xCdSoeepS07jW8UgcHNurfHvUzogEq5xk hu507c3BarVjyWCJOylMNR98Yd8VqD9UfmX0Hb8/BrA+Hl6/DB/eqGptrf4BSRwcZQM32aZK 7Pj2XbGWIUrZrd70x1eAP9QE3P79Y2oLrsCgbZJfEwCgvz9JjGmQqQkRiTVzlZVCJYcyGGsD /0tbFCzD2h20ahe8rC1gbb3K3qk+LpBtvjBu1RY9drYk0NymiGbJWZgab6t1jM7sk2vuf0Py O9Hf9XBmK0uE9IgMaiCpc32XV9oASz6UJebwkX+zF2jG5I1BfnO9g7KlotcA/v5ClMjgo6Gl MDY4HxoSRu3i1cqqSDtVlt+AOVBJBACrZcnHAUSuCXBPy0jOlBhxPqRWv6ND4c9PH1xjQ3NP nxJuMBS8rnNg22uyfAgmBKNLpLgAGVRMZGaGoJObGf72s6TeIqKJo/LtggAS9qAUiuKVnygo 3wjfkS9A3DRO+SpU7JqWdsveeIQyeyEJ/8PTowmSQLakF+3fote9ybzd880fSmFuIEJldWxp Y2ggPGpiZXVsaWNoQHN1c2UuY29tPsJgBBMRAgAgBQJZN5xEAhsDBgsJCAcDAgQVAggDBBYC AwECHgECF4AACgkQoDSui/t3IH4J+wCfQ5jHdEjCRHj23O/5ttg9r9OIruwAn3103WUITZee e7Sbg12UgcQ5lv7SzsFNBFk3nEQQCACCuTjCjFOUdi5Nm244F+78kLghRcin/awv+IrTcIWF hUpSs1Y91iQQ7KItirz5uwCPlwejSJDQJLIS+QtJHaXDXeV6NI0Uef1hP20+y8qydDiVkv6l IreXjTb7DvksRgJNvCkWtYnlS3mYvQ9NzS9PhyALWbXnH6sIJd2O9lKS1Mrfq+y0IXCP10eS FFGg+Av3IQeFatkJAyju0PPthyTqxSI4lZYuJVPknzgaeuJv/2NccrPvmeDg6Coe7ZIeQ8Yj t0ARxu2xytAkkLCel1Lz1WLmwLstV30g80nkgZf/wr+/BXJW/oIvRlonUkxv+IbBM3dX2OV8 AmRv1ySWPTP7AAMFB/9PQK/VtlNUJvg8GXj9ootzrteGfVZVVT4XBJkfwBcpC/XcPzldjv+3 HYudvpdNK3lLujXeA5fLOH+Z/G9WBc5pFVSMocI71I8bT8lIAzreg0WvkWg5V2WZsUMlnDL9 mpwIGFhlbM3gfDMs7MPMu8YQRFVdUvtSpaAs8OFfGQ0ia3LGZcjA6Ik2+xcqscEJzNH+qh8V m5jjp28yZgaqTaRbg3M/+MTbMpicpZuqF4rnB0AQD12/3BNWDR6bmh+EkYSMcEIpQmBM51qM EKYTQGybRCjpnKHGOxG0rfFY1085mBDZCH5Kx0cl0HVJuQKC+dV2ZY5AqjcKwAxpE75MLFkr wkkEGBECAAkFAlk3nEQCGwwACgkQoDSui/t3IH7nnwCfcJWUDUFKdCsBH/E5d+0ZnMQi+G0A nAuWpQkjM1ASeQwSHEeAWPgskBQL
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Mon, 15 Jan 2024 16:10:37 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Even functions used on infrequently executed paths want converting: This
way all pre-filled struct cpufreq_driver instances can become
__initconst_cf_clobber, thus allowing to eliminate another 15 ENDBR
during the 2nd phase of alternatives patching.

For acpi-cpufreq's optionally populated .get hook make sure alternatives
patching can actually see the pointer. See also the code comment.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Instead of adjusting the call site, we could also purge the .setpolicy
hook: None of the three drivers actually implements it.

--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -625,12 +625,14 @@ static int cf_check acpi_cpufreq_cpu_exi
     return 0;
 }
 
-static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver = {
+static const struct cpufreq_driver __initconst_cf_clobber
+acpi_cpufreq_driver = {
     .name   = "acpi-cpufreq",
     .verify = acpi_cpufreq_verify,
     .target = acpi_cpufreq_target,
     .init   = acpi_cpufreq_cpu_init,
     .exit   = acpi_cpufreq_cpu_exit,
+    .get    = get_cur_freq_on_cpu,
 };
 
 static int __init cf_check cpufreq_driver_init(void)
@@ -675,6 +677,19 @@ static int __init cf_check cpufreq_drive
 }
 presmp_initcall(cpufreq_driver_init);
 
+static int __init cf_check cpufreq_driver_late_init(void)
+{
+    /*
+     * While acpi_cpufreq_driver wants to unconditionally have all hooks
+     * populated for __initconst_cf_clobber to have as much of an effect as
+     * possible, zap the .get hook here (but not in cpufreq_driver_init()),
+     * until acpi_cpufreq_cpu_init() knows whether it's wanted / needed.
+     */
+    cpufreq_driver.get = NULL;
+    return 0;
+}
+__initcall(cpufreq_driver_late_init);
+
 int cpufreq_cpu_init(unsigned int cpuid)
 {
     int ret;
--- a/xen/arch/x86/acpi/cpufreq/hwp.c
+++ b/xen/arch/x86/acpi/cpufreq/hwp.c
@@ -513,8 +513,8 @@ static int cf_check hwp_cpufreq_update(i
     return per_cpu(hwp_drv_data, cpuid)->ret;
 }
 
-static const struct cpufreq_driver __initconstrel hwp_cpufreq_driver =
-{
+static const struct cpufreq_driver __initconst_cf_clobber
+hwp_cpufreq_driver = {
     .name   = XEN_HWP_DRIVER_NAME,
     .verify = hwp_cpufreq_verify,
     .target = hwp_cpufreq_target,
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -317,7 +317,8 @@ static int cf_check powernow_cpufreq_cpu
     return 0;
 }
 
-static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver = {
+static const struct cpufreq_driver __initconst_cf_clobber
+powernow_cpufreq_driver = {
     .name   = "powernow",
     .verify = powernow_cpufreq_verify,
     .target = powernow_cpufreq_target,
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -240,7 +240,8 @@ static int get_cpufreq_para(struct xen_s
         return ret;
 
     op->u.get_para.cpuinfo_cur_freq =
-        cpufreq_driver.get ? cpufreq_driver.get(op->cpuid) : policy->cur;
+        cpufreq_driver.get ? alternative_call(cpufreq_driver.get, op->cpuid)
+                           : policy->cur;
     op->u.get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq;
     op->u.get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq;
     op->u.get_para.turbo_enabled = cpufreq_get_turbo_status(op->cpuid);
--- a/xen/drivers/cpufreq/cpufreq.c
+++ b/xen/drivers/cpufreq/cpufreq.c
@@ -278,7 +278,7 @@ int cpufreq_add_cpu(unsigned int cpu)
         policy->cpu = cpu;
         per_cpu(cpufreq_cpu_policy, cpu) = policy;
 
-        ret = cpufreq_driver.init(policy);
+        ret = alternative_call(cpufreq_driver.init, policy);
         if (ret) {
             free_cpumask_var(policy->cpus);
             xfree(policy);
@@ -337,7 +337,7 @@ err1:
     cpumask_clear_cpu(cpu, cpufreq_dom->map);
 
     if (cpumask_empty(policy->cpus)) {
-        cpufreq_driver.exit(policy);
+        alternative_call(cpufreq_driver.exit, policy);
         free_cpumask_var(policy->cpus);
         xfree(policy);
     }
@@ -401,7 +401,7 @@ int cpufreq_del_cpu(unsigned int cpu)
     cpumask_clear_cpu(cpu, cpufreq_dom->map);
 
     if (cpumask_empty(policy->cpus)) {
-        cpufreq_driver.exit(policy);
+        alternative_call(cpufreq_driver.exit, policy);
         free_cpumask_var(policy->cpus);
         xfree(policy);
     }
--- a/xen/drivers/cpufreq/utility.c
+++ b/xen/drivers/cpufreq/utility.c
@@ -413,7 +413,7 @@ int cpufreq_update_turbo(int cpuid, int
     policy->turbo = new_state;
     if (cpufreq_driver.update)
     {
-        ret = cpufreq_driver.update(cpuid, policy);
+        ret = alternative_call(cpufreq_driver.update, cpuid, policy);
         if (ret)
             policy->turbo = curr_state;
     }
@@ -449,7 +449,7 @@ int __cpufreq_set_policy(struct cpufreq_
         return -EINVAL;
 
     /* verify the cpu speed can be set within this limit */
-    ret = cpufreq_driver.verify(policy);
+    ret = alternative_call(cpufreq_driver.verify, policy);
     if (ret)
         return ret;
 
@@ -457,7 +457,7 @@ int __cpufreq_set_policy(struct cpufreq_
     data->max = policy->max;
     data->limits = policy->limits;
     if (cpufreq_driver.setpolicy)
-        return cpufreq_driver.setpolicy(data);
+        return alternative_call(cpufreq_driver.setpolicy, data);
 
     if (policy->governor != data->governor) {
         /* save old, working values */



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.