|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [XEN RFC PATCH 11/40] xen/x86: Move NUMA nodes and memory block ranges to common
Hi Julien,
> -----Original Message-----
> From: Julien Grall <julien@xxxxxxx>
> Sent: 2021年8月25日 1:41
> To: Wei Chen <Wei.Chen@xxxxxxx>; xen-devel@xxxxxxxxxxxxxxxxxxxx;
> sstabellini@xxxxxxxxxx; jbeulich@xxxxxxxx
> Cc: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
> Subject: Re: [XEN RFC PATCH 11/40] xen/x86: Move NUMA nodes and memory
> block ranges to common
>
> Hi Wei,
>
> On 11/08/2021 11:23, Wei Chen wrote:
> > These data structures and functions are used to create the
> > mapping between node and memory blocks. In device tree based
> > NUMA, we will reuse these data structures and functions, so
> > we move this part of code from x86 to common.
> >
> > Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
> > ---
> > xen/arch/x86/srat.c | 50 -------------------------------------
> > xen/common/numa.c | 51 ++++++++++++++++++++++++++++++++++++++
> > xen/include/asm-x86/numa.h | 8 ------
> > xen/include/xen/numa.h | 15 +++++++++++
> > 4 files changed, 66 insertions(+), 58 deletions(-)
> >
> > diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
> > index 6b77b98201..6d68b8a614 100644
> > --- a/xen/arch/x86/srat.c
> > +++ b/xen/arch/x86/srat.c
> > @@ -26,7 +26,6 @@ static struct acpi_table_slit *__read_mostly acpi_slit;
> >
> > static nodemask_t memory_nodes_parsed __initdata;
> > static nodemask_t processor_nodes_parsed __initdata;
> > -static struct node nodes[MAX_NUMNODES] __initdata;
> >
> > struct pxm2node {
> > unsigned pxm;
> > @@ -37,9 +36,6 @@ static struct pxm2node __read_mostly
> pxm2node[MAX_NUMNODES] =
> >
> > static unsigned node_to_pxm(nodeid_t n);
> >
> > -static int num_node_memblks;
> > -static struct node node_memblk_range[NR_NODE_MEMBLKS];
> > -static nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
> > static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS);
> >
> > static inline bool node_found(unsigned idx, unsigned pxm)
> > @@ -104,52 +100,6 @@ nodeid_t setup_node(unsigned pxm)
> > return node;
> > }
> >
> > -int valid_numa_range(u64 start, u64 end, nodeid_t node)
> > -{
> > - int i;
> > -
> > - for (i = 0; i < num_node_memblks; i++) {
> > - struct node *nd = &node_memblk_range[i];
> > -
> > - if (nd->start <= start && nd->end >= end &&
> > - memblk_nodeid[i] == node)
> > - return 1;
> > - }
> > -
> > - return 0;
> > -}
> > -
> > -static __init int conflicting_memblks(u64 start, u64 end)
> > -{
> > - int i;
> > -
> > - for (i = 0; i < num_node_memblks; i++) {
> > - struct node *nd = &node_memblk_range[i];
> > - if (nd->start == nd->end)
> > - continue;
> > - if (nd->end > start && nd->start < end)
> > - return i;
> > - if (nd->end == end && nd->start == start)
> > - return i;
> > - }
> > - return -1;
> > -}
> > -
> > -static __init void cutoff_node(int i, u64 start, u64 end)
> > -{
> > - struct node *nd = &nodes[i];
> > - if (nd->start < start) {
> > - nd->start = start;
> > - if (nd->end < nd->start)
> > - nd->start = nd->end;
> > - }
> > - if (nd->end > end) {
> > - nd->end = end;
> > - if (nd->start > nd->end)
> > - nd->start = nd->end;
> > - }
> > -}
> > -
> > static __init void bad_srat(void)
> > {
> > int i;
> > diff --git a/xen/common/numa.c b/xen/common/numa.c
> > index 9b6f23dfc1..1facc8fe2b 100644
> > --- a/xen/common/numa.c
> > +++ b/xen/common/numa.c
> > @@ -29,6 +29,11 @@ nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
> >
> > cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
> >
> > +struct node nodes[MAX_NUMNODES] __initdata;
> > +int num_node_memblks;
> > +struct node node_memblk_range[NR_NODE_MEMBLKS];
> > +nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
> > +
> > /*
> > * Given a shift value, try to populate memnodemap[]
> > * Returns :
> > @@ -136,6 +141,52 @@ int __init compute_hash_shift(struct node *nodes,
> int numnodes,
> > return shift;
> > }
> >
> > +int valid_numa_range(u64 start, u64 end, nodeid_t node)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < num_node_memblks; i++) {
> > + struct node *nd = &node_memblk_range[i];
> > +
> > + if (nd->start <= start && nd->end >= end &&
> > + memblk_nodeid[i] == node)
> > + return 1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +int __init conflicting_memblks(u64 start, u64 end)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < num_node_memblks; i++) {
> > + struct node *nd = &node_memblk_range[i];
> > + if (nd->start == nd->end)
> > + continue;
> > + if (nd->end > start && nd->start < end)
> > + return i;
> > + if (nd->end == end && nd->start == start)
> > + return i;
> > + }
> > + return -1;
> > +}
> > +
> > +void __init cutoff_node(int i, u64 start, u64 end)
> > +{
> > + struct node *nd = &nodes[i];
> > + if (nd->start < start) {
> > + nd->start = start;
> > + if (nd->end < nd->start)
> > + nd->start = nd->end;
> > + }
> > + if (nd->end > end) {
> > + nd->end = end;
> > + if (nd->start > nd->end)
> > + nd->start = nd->end;
> > + }
> > +}
> > +
> > void numa_add_cpu(int cpu)
> > {
> > cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
> > diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
> > index 07ff78ea1b..e8a92ad9df 100644
> > --- a/xen/include/asm-x86/numa.h
> > +++ b/xen/include/asm-x86/numa.h
> > @@ -17,12 +17,6 @@ extern cpumask_t node_to_cpumask[];
> > #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node]))
> > #define node_to_cpumask(node) (node_to_cpumask[node])
> >
> > -struct node {
> > - u64 start,end;
> > -};
> > -
> > -extern int compute_hash_shift(struct node *nodes, int numnodes,
> > - nodeid_t *nodeids);
> > extern nodeid_t pxm_to_node(unsigned int pxm);
> >
> > #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
> > @@ -45,8 +39,6 @@ static inline void clear_node_cpumask(int cpu)
> > cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
> > }
> >
> > -extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
> > -
> > void srat_parse_regions(u64 addr);
> > extern u8 __node_distance(nodeid_t a, nodeid_t b);
> > unsigned int arch_get_dma_bitsize(void);
> > diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
> > index 5af74b357f..67b79a73a3 100644
> > --- a/xen/include/xen/numa.h
> > +++ b/xen/include/xen/numa.h
> > @@ -54,6 +54,21 @@ static inline __attribute__((pure)) nodeid_t
> phys_to_nid(paddr_t addr)
> >
> > extern void numa_add_cpu(int cpu);
> >
> > +struct node {
> > + u64 start,end;
> > +};
> > +
> > +extern struct node nodes[MAX_NUMNODES];
> > +extern int num_node_memblks;
> > +extern struct node node_memblk_range[NR_NODE_MEMBLKS];
> > +extern nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
>
> I am not overly happy that the 4 ariables above are now exported.
> Looking at the code, they are (only?) used in arch specific code for
> acpi_numa_memory_affinity_init() and dtb_numa_memory_affinity_init().
>
> There bits touching the variables looks quite similar between the two
> functions. The main differences seems to be the messages in printk() and
> the hotplug bits.
>
> So I think we should attempt to abstract the code. IIRC, we discussed
> some of the way to abstract when Vijay Kilari attempted to add support
> for NUMA (see [1]). It might be worth to have a look and see if you can
> re-use some of the ideas.
Ok, I will look at that thread. If it's useful, I will do it in next
version.
>
> > +
> > +extern int compute_hash_shift(struct node *nodes, int numnodes,
> > + nodeid_t *nodeids);
> > +extern int conflicting_memblks(u64 start, u64 end);
> > +extern void cutoff_node(int i, u64 start, u64 end);
> > +extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
> > +
> > #endif /* CONFIG_NUMA */
> >
> > #endif /* _XEN_NUMA_H */
> >
>
> [1]
> https://lore.kernel.org/xen-devel/1500378106-2620-1-git-send-email-
> vijay.kilari@xxxxxxxxx/
>
> --
> Julien Grall
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |