[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH 1/2] xen/cpupool: Create different cpupools at boot time



On 17.11.21 10:57, Luca Fancellu wrote:
Introduce an architecture specific way to create different
cpupool at boot time, this is particularly useful on ARM
biglittle system where there might be the need to have
different cpupools for each type of core, but also systems
using NUMA can have different cpu pool for each node.

Signed-off-by: Luca Fancellu <luca.fancellu@xxxxxxx>
---
  xen/arch/arm/Kconfig       | 15 +++++++
  xen/arch/arm/Makefile      |  1 +
  xen/arch/arm/cpupool.c     | 84 ++++++++++++++++++++++++++++++++++++++
  xen/common/sched/cpupool.c |  5 ++-
  xen/include/xen/cpupool.h  | 30 ++++++++++++++
  5 files changed, 134 insertions(+), 1 deletion(-)
  create mode 100644 xen/arch/arm/cpupool.c
  create mode 100644 xen/include/xen/cpupool.h

diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
index ecfa6822e4..4d7cc9f3bc 100644
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -33,6 +33,21 @@ config ACPI
          Advanced Configuration and Power Interface (ACPI) support for Xen is
          an alternative to device tree on ARM64.
+config BOOT_TIME_CPUPOOLS
+       bool "Create cpupools per cpu type at boot time."
+       depends on ARM
+       default n
+       help
+
+         Creates, during boot, a cpu pool for each type of CPU found on
+         the system. This option is useful on system with heterogeneous
+         types of core.
+
+config MAX_BOOT_TIME_CPUPOOLS
+       depends on BOOT_TIME_CPUPOOLS
+       int "Maximum number of cpupools that can be created at boot time."
+       default 16
+
  config GICV3
        bool "GICv3 driver"
        depends on ARM_64 && !NEW_VGIC
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 07f634508e..0bb8b84750 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_HAS_ALTERNATIVE) += alternative.o
  obj-y += bootfdt.init.o
  obj-y += cpuerrata.o
  obj-y += cpufeature.o
+obj-$(CONFIG_BOOT_TIME_CPUPOOLS) += cpupool.o
  obj-y += decode.o
  obj-y += device.o
  obj-$(CONFIG_IOREQ_SERVER) += dm.o
diff --git a/xen/arch/arm/cpupool.c b/xen/arch/arm/cpupool.c
new file mode 100644
index 0000000000..550521e352
--- /dev/null
+++ b/xen/arch/arm/cpupool.c
@@ -0,0 +1,84 @@
+/******************************************************************************
+ * cpupool.c
+ *
+ * (C) 2021, arm
+ */
+
+#include <xen/cpumask.h>
+#include <xen/cpupool.h>
+#include <xen/err.h>
+#include <xen/sched.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include "../../common/sched/private.h"

No, please don't do that.

You should try to add architecture agnostic service functions in
common/sched/cpupool.c removing the need to include that file here.

+
+typedef struct {
+    register_t     midr;
+    struct cpupool *pool;
+} pool_t;
+
+static pool_t __initdata pool_cpu_map[CONFIG_MAX_BOOT_TIME_CPUPOOLS];
+
+void __init arch_allocate_cpupools(const cpumask_t *cpu_online_map,
+                                   cpupool_create_t cpupool_create)

Drop the cpupool_create parameter here and ...

+{
+    unsigned int cpu, i;
+
+    /* First pool is the default pool0 associated with midr of the boot core */
+    pool_cpu_map[0].midr = system_cpuinfo.midr.bits;
+    pool_cpu_map[0].pool = cpupool0;
+
+    for_each_cpu ( cpu, cpu_online_map )
+    {
+        for ( i = 0; i < CONFIG_MAX_BOOT_TIME_CPUPOOLS; i++ )
+            if ( !pool_cpu_map[i].pool ||
+                 (cpu_data[cpu].midr.bits == pool_cpu_map[i].midr) )
+                break;
+
+        if ( i < CONFIG_MAX_BOOT_TIME_CPUPOOLS )
+        {
+            if ( !pool_cpu_map[i].pool )
+            {
+                /* There is no pool for this cpu midr, create it */
+                pool_cpu_map[i].midr = cpu_data[cpu].midr.bits;
+                debugtrace_printk("Create pool %u for cpu MIDR: 0x%"
+                                  PRIregister"\n", i, pool_cpu_map[i].midr);
+                pool_cpu_map[i].pool =
+                    cpupool_create(i, scheduler_get_default()->sched_id);
+                BUG_ON(IS_ERR(pool_cpu_map[i].pool));
+                cpupool_put(pool_cpu_map[i].pool);

... call a new wrapper in common/sched/cpupool.c taking just the id as
parameter (e.g. "cpupool *cpupool_create_default(unsigned int id)")
which will use the default scheduler and do the cpupool_create() and
cpupool_put() calls internally.

+            }
+        }
+        else
+            panic("Could not create cpupool, maximum number reached (%u)",
+                  (unsigned int)(CONFIG_MAX_BOOT_TIME_CPUPOOLS));
+    }
+
+    /* Print useful information about the pools */
+    for ( i = 0; i < CONFIG_MAX_BOOT_TIME_CPUPOOLS; i++ )
+        if ( pool_cpu_map[i].pool )
+            printk(XENLOG_INFO "Pool-%u contains cpu with MIDR: 0x%"
+                   PRIregister"\n", i, pool_cpu_map[i].midr);
+}
+
+struct cpupool *__init arch_get_cpupool(unsigned int cpu)
+{
+    unsigned int i;
+
+    for ( i = 0; i < CONFIG_MAX_BOOT_TIME_CPUPOOLS; i++ )
+        if ( pool_cpu_map[i].pool &&
+             (cpu_data[cpu].midr.bits == pool_cpu_map[i].midr) )
+            return pool_cpu_map[i].pool;
+
+    return cpupool0;
+}
I don't think this return can be reached.

What about adding an "ASSERT_UNREACHABLE()" here and return NULL instead?

+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 8c6e6eb9cc..7389f04e28 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -13,6 +13,7 @@
#include <xen/cpu.h>
  #include <xen/cpumask.h>
+#include <xen/cpupool.h>
  #include <xen/guest_access.h>
  #include <xen/hypfs.h>
  #include <xen/init.h>
@@ -1231,12 +1232,14 @@ static int __init cpupool_init(void)
      cpupool_put(cpupool0);
      register_cpu_notifier(&cpu_nfb);
+ arch_allocate_cpupools(&cpu_online_map, cpupool_create);
+
      spin_lock(&cpupool_lock);
cpumask_copy(&cpupool_free_cpus, &cpu_online_map); for_each_cpu ( cpu, &cpupool_free_cpus )
-        cpupool_assign_cpu_locked(cpupool0, cpu);
+        cpupool_assign_cpu_locked(arch_get_cpupool(cpu), cpu);
spin_unlock(&cpupool_lock); diff --git a/xen/include/xen/cpupool.h b/xen/include/xen/cpupool.h
new file mode 100644
index 0000000000..4b50af9e3d
--- /dev/null
+++ b/xen/include/xen/cpupool.h
@@ -0,0 +1,30 @@
+#ifndef __XEN_CPUPOOL_H
+#define __XEN_CPUPOOL_H
+
+#include <xen/sched.h>
+
+typedef struct cpupool* (*cpupool_create_t)(unsigned int, unsigned int);
+
+#ifdef CONFIG_BOOT_TIME_CPUPOOLS
+
+void arch_allocate_cpupools(const cpumask_t *cpu_online_map,
+                            cpupool_create_t cpupool_create);
+
+struct cpupool *arch_get_cpupool(unsigned int cpu);
+
+#else
+
+static inline __init
+void arch_allocate_cpupools(const cpumask_t *cpu_online_map,
+                            cpupool_create_t cpupool_create)
+{
+}
+
+static inline struct cpupool *__init arch_get_cpupool(unsigned int cpu)
+{
+    return cpupool0;
+}
+
+#endif
+
+#endif /* __XEN_CPUPOOL_H */


I think this can all go into include/xen/sched.h.


Juergen

Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: OpenPGP public key

Attachment: OpenPGP_signature
Description: OpenPGP digital signature


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.