[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH v3 03/24] x86: NUMA: Fix datatypes and attributes
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> Change u{8,32,64} to uint{8,32,64}_t, u64 to paddr_t wherever applicable. Fix attributes coding styles. Also changed - Some variables from int to unsigned int - Used pfn_to_paddr/paddr_to_pfn whereever required. - Alloc memnodemap[] of size BITS_PER_LONG. Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> --- v3: - Change unsigned to unsigned int - Update commit message - Drop changing memnode_shift as unsigned int - Used pfn_to_paddr/paddr_to_pfn - Alloc memnodemap[] of size BITS_PER_LONG --- xen/arch/x86/numa.c | 54 +++++++++++++++++++++----------------- xen/arch/x86/srat.c | 64 +++++++++++++++++++++++----------------------- xen/include/asm-arm/numa.h | 2 +- xen/include/asm-x86/acpi.h | 2 +- xen/include/asm-x86/numa.h | 16 ++++++------ 5 files changed, 72 insertions(+), 66 deletions(-) diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c index 444d7ad..aa4a7c1 100644 --- a/xen/arch/x86/numa.c +++ b/xen/arch/x86/numa.c @@ -25,11 +25,17 @@ struct node_data node_data[MAX_NUMNODES]; /* Mapping from pdx to node id */ int memnode_shift; -static typeof(*memnodemap) _memnodemap[64]; + +/* + * In case of numa init failure or numa off, + * memnode_shift is initialized to BITS_PER_LONG - 1. Hence allocate + * memnodemap[] of BITS_PER_LONG. + */ +static typeof(*memnodemap) _memnodemap[BITS_PER_LONG]; unsigned long memnodemapsize; -u8 *memnodemap; +uint8_t *memnodemap; -nodeid_t cpu_to_node[NR_CPUS] __read_mostly = { +nodeid_t __read_mostly cpu_to_node[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; /* @@ -38,7 +44,7 @@ nodeid_t cpu_to_node[NR_CPUS] __read_mostly = { nodeid_t apicid_to_node[MAX_LOCAL_APIC] = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; -cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; +cpumask_t __read_mostly node_to_cpumask[MAX_NUMNODES]; nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; @@ -166,12 +172,12 @@ int __init compute_hash_shift(struct node *nodes, int numnodes, return shift; } /* initialize NODE_DATA given nodeid and start/end */ -void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end) +void __init setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end) { unsigned long start_pfn, end_pfn; - start_pfn = start >> PAGE_SHIFT; - end_pfn = end >> PAGE_SHIFT; + start_pfn = paddr_to_pfn(start); + end_pfn = paddr_to_pfn(end); NODE_DATA(nodeid)->node_start_pfn = start_pfn; NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; @@ -201,19 +207,20 @@ void __init numa_init_array(void) } #ifdef CONFIG_NUMA_EMU -static int numa_fake __initdata = 0; +static unsigned int __initdata numa_fake; /* Numa emulation */ -static int __init numa_emulation(u64 start_pfn, u64 end_pfn) +static int __init numa_emulation(uint64_t start_pfn, uint64_t end_pfn) { - int i; + unsigned int i; struct node nodes[MAX_NUMNODES]; - u64 sz = ((end_pfn - start_pfn) << PAGE_SHIFT) / numa_fake; + uint64_t sz = ((end_pfn - start_pfn) << PAGE_SHIFT) / numa_fake; /* Kludge needed for the hash function */ if ( hweight64(sz) > 1 ) { - u64 x = 1; + uint64_t x = 1; + while ( (x << 1) < sz ) x <<= 1; if ( x < sz / 2 ) @@ -225,9 +232,9 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn) memset(&nodes,0,sizeof(nodes)); for ( i = 0; i < numa_fake; i++ ) { - nodes[i].start = (start_pfn << PAGE_SHIFT) + i * sz; + nodes[i].start = pfn_to_paddr(start_pfn) + i * sz; if ( i == numa_fake - 1 ) - sz = (end_pfn << PAGE_SHIFT) - nodes[i].start; + sz = pfn_to_paddr(end_pfn) - nodes[i].start; nodes[i].end = nodes[i].start + sz; printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n", @@ -260,8 +267,8 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) #endif #ifdef CONFIG_ACPI_NUMA - if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT) ) + if ( !numa_off && + !acpi_scan_nodes(pfn_to_paddr(start_pfn), pfn_to_paddr(end_pfn)) ) return; #endif @@ -269,8 +276,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n", - (u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT); + pfn_to_paddr(start_pfn), pfn_to_paddr(end_pfn)); /* setup dummy node covering all memory */ memnode_shift = BITS_PER_LONG - 1; memnodemap = _memnodemap; @@ -279,8 +285,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) for ( i = 0; i < nr_cpu_ids; i++ ) numa_set_node(i, 0); cpumask_copy(&node_to_cpumask[0], cpumask_of(0)); - setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT); + setup_node_bootmem(0, pfn_to_paddr(start_pfn), pfn_to_paddr(end_pfn)); } void numa_add_cpu(int cpu) @@ -294,7 +299,7 @@ void numa_set_node(int cpu, nodeid_t node) } /* [numa=off] */ -static __init int numa_setup(char *opt) +static int __init numa_setup(char *opt) { if ( !strncmp(opt, "off", 3) ) numa_off = true; @@ -339,7 +344,8 @@ void __init init_cpu_to_node(void) for ( i = 0; i < nr_cpu_ids; i++ ) { - u32 apicid = x86_cpu_to_apicid[i]; + uint32_t apicid = x86_cpu_to_apicid[i]; + if ( apicid == BAD_APICID ) continue; node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE; @@ -380,7 +386,7 @@ static void dump_numa(unsigned char key) const struct vnuma_info *vnuma; printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key, - (u32)(now >> 32), (u32)now); + (uint32_t)(now >> 32), (uint32_t)now); for_each_online_node ( i ) { @@ -507,7 +513,7 @@ static void dump_numa(unsigned char key) rcu_read_unlock(&domlist_read_lock); } -static __init int register_numa_trigger(void) +static int __init register_numa_trigger(void) { register_keyhandler('u', dump_numa, "dump NUMA info", 1); return 0; diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c index ec08112..209ffc7 100644 --- a/xen/arch/x86/srat.c +++ b/xen/arch/x86/srat.c @@ -23,33 +23,33 @@ static struct acpi_table_slit *__read_mostly acpi_slit; -static nodemask_t memory_nodes_parsed __initdata; -static nodemask_t processor_nodes_parsed __initdata; -static struct node nodes[MAX_NUMNODES] __initdata; +static nodemask_t __initdata memory_nodes_parsed; +static nodemask_t __initdata processor_nodes_parsed; +static struct node __initdata nodes[MAX_NUMNODES]; struct pxm2node { - unsigned pxm; + unsigned int pxm; nodeid_t node; }; static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} }; -static unsigned node_to_pxm(nodeid_t n); +static unsigned int node_to_pxm(nodeid_t n); static int num_node_memblks; static struct node node_memblk_range[NR_NODE_MEMBLKS]; static nodeid_t memblk_nodeid[NR_NODE_MEMBLKS]; static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS); -static inline bool node_found(unsigned idx, unsigned pxm) +static inline bool node_found(unsigned int idx, unsigned int pxm) { return ((pxm2node[idx].pxm == pxm) && (pxm2node[idx].node != NUMA_NO_NODE)); } -nodeid_t pxm_to_node(unsigned pxm) +nodeid_t pxm_to_node(unsigned int pxm) { - unsigned i; + unsigned int i; if ((pxm < ARRAY_SIZE(pxm2node)) && node_found(pxm, pxm)) return pxm2node[pxm].node; @@ -61,12 +61,12 @@ nodeid_t pxm_to_node(unsigned pxm) return NUMA_NO_NODE; } -nodeid_t setup_node(unsigned pxm) +nodeid_t setup_node(unsigned int pxm) { nodeid_t node; - unsigned idx; + unsigned int idx; static bool warned; - static unsigned nodes_found; + static unsigned int nodes_found; BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE); @@ -103,7 +103,7 @@ nodeid_t setup_node(unsigned pxm) return node; } -int valid_numa_range(u64 start, u64 end, nodeid_t node) +int valid_numa_range(paddr_t start, paddr_t end, nodeid_t node) { int i; @@ -118,7 +118,7 @@ int valid_numa_range(u64 start, u64 end, nodeid_t node) return 0; } -static __init int conflicting_memblks(u64 start, u64 end) +static int __init conflicting_memblks(paddr_t start, paddr_t end) { int i; @@ -134,7 +134,7 @@ static __init int conflicting_memblks(u64 start, u64 end) return -1; } -static __init void cutoff_node(int i, u64 start, u64 end) +static void __init cutoff_node(nodeid_t i, paddr_t start, paddr_t end) { struct node *nd = &nodes[i]; if (nd->start < start) { @@ -149,7 +149,7 @@ static __init void cutoff_node(int i, u64 start, u64 end) } } -static __init void bad_srat(void) +static void __init bad_srat(void) { int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); @@ -167,13 +167,13 @@ static __init void bad_srat(void) * distance than the others. * Do some quick checks here and only use the SLIT if it passes. */ -static __init int slit_valid(struct acpi_table_slit *slit) +static int __init slit_valid(struct acpi_table_slit *slit) { int i, j; int d = slit->locality_count; for (i = 0; i < d; i++) { for (j = 0; j < d; j++) { - u8 val = slit->entry[d*i + j]; + uint8_t val = slit->entry[d*i + j]; if (i == j) { if (val != 10) return 0; @@ -207,7 +207,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) void __init acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa) { - unsigned pxm; + unsigned int pxm; nodeid_t node; if (srat_disabled()) @@ -241,7 +241,7 @@ acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa) void __init acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa) { - unsigned pxm; + unsigned int pxm; nodeid_t node; if (srat_disabled()) @@ -274,8 +274,8 @@ acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa) void __init acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) { - u64 start, end; - unsigned pxm; + paddr_t start, end; + unsigned int pxm; nodeid_t node; int i; @@ -401,7 +401,7 @@ static int __init nodes_cover_memory(void) void __init acpi_numa_arch_fixup(void) {} -static u64 __initdata srat_region_mask; +static uint64_t __initdata srat_region_mask; static int __init srat_parse_region(struct acpi_subtable_header *header, const unsigned long end) @@ -428,9 +428,9 @@ static int __init srat_parse_region(struct acpi_subtable_header *header, return 0; } -void __init srat_parse_regions(u64 addr) +void __init srat_parse_regions(paddr_t addr) { - u64 mask; + uint64_t mask; unsigned int i; if (acpi_disabled || acpi_numa < 0 || @@ -453,9 +453,9 @@ void __init srat_parse_regions(u64 addr) } /* Use the information discovered above to actually set up the nodes. */ -int __init acpi_scan_nodes(u64 start, u64 end) +int __init acpi_scan_nodes(paddr_t start, paddr_t end) { - int i; + unsigned int i; nodemask_t all_nodes_parsed; /* First clean up the node list */ @@ -485,7 +485,7 @@ int __init acpi_scan_nodes(u64 start, u64 end) /* Finally register nodes */ for_each_node_mask(i, all_nodes_parsed) { - u64 size = nodes[i].end - nodes[i].start; + uint64_t size = nodes[i].end - nodes[i].start; if ( size == 0 ) printk(KERN_WARNING "SRAT: Node %u has no memory. " "BIOS Bug or mis-configured hardware?\n", i); @@ -502,9 +502,9 @@ int __init acpi_scan_nodes(u64 start, u64 end) return 0; } -static unsigned node_to_pxm(nodeid_t n) +static unsigned int node_to_pxm(nodeid_t n) { - unsigned i; + unsigned int i; if ((n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n)) return pxm2node[n].pxm; @@ -514,10 +514,10 @@ static unsigned node_to_pxm(nodeid_t n) return 0; } -u8 __node_distance(nodeid_t a, nodeid_t b) +uint8_t __node_distance(nodeid_t a, nodeid_t b) { - unsigned index; - u8 slit_val; + unsigned int index; + uint8_t slit_val; if (!acpi_slit) return a == b ? 10 : 20; diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h index a2c1a34..53f99af 100644 --- a/xen/include/asm-arm/numa.h +++ b/xen/include/asm-arm/numa.h @@ -1,7 +1,7 @@ #ifndef __ARCH_ARM_NUMA_H #define __ARCH_ARM_NUMA_H -typedef u8 nodeid_t; +typedef uint8_t nodeid_t; /* Fake one node for now. See also node_online_map. */ #define cpu_to_node(cpu) 0 diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h index 15be784..a6fad1e 100644 --- a/xen/include/asm-x86/acpi.h +++ b/xen/include/asm-x86/acpi.h @@ -104,7 +104,7 @@ extern void acpi_reserve_bootmem(void); #define ARCH_HAS_POWER_INIT 1 extern s8 acpi_numa; -extern int acpi_scan_nodes(u64 start, u64 end); +extern int acpi_scan_nodes(paddr_t start, paddr_t end); #ifdef CONFIG_ACPI_SLEEP diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h index c0de57b..5e8474f 100644 --- a/xen/include/asm-x86/numa.h +++ b/xen/include/asm-x86/numa.h @@ -6,7 +6,7 @@ #define MAX_NUMNODES NR_NODES #define NR_NODE_MEMBLKS (MAX_NUMNODES * 2) -typedef u8 nodeid_t; +typedef uint8_t nodeid_t; extern int srat_rev; @@ -19,8 +19,8 @@ extern cpumask_t node_to_cpumask[]; #define node_to_cpumask(node) (node_to_cpumask[node]) struct node { - u64 start; - u64 end; + paddr_t start; + paddr_t end; }; extern int compute_hash_shift(struct node *nodes, int numnodes, @@ -39,14 +39,14 @@ extern void numa_set_node(int cpu, nodeid_t node); extern nodeid_t setup_node(unsigned int pxm); extern void srat_detect_node(int cpu); -extern void setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end); +extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end); extern nodeid_t apicid_to_node[]; extern void init_cpu_to_node(void); /* Simple perfect hash to map pdx to node numbers */ extern int memnode_shift; extern unsigned long memnodemapsize; -extern u8 *memnodemap; +extern uint8_t *memnodemap; struct node_data { unsigned long node_start_pfn; @@ -73,10 +73,10 @@ static inline __attribute_pure__ nodeid_t phys_to_nid(paddr_t addr) #define node_end_pfn(nid) NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages -extern int valid_numa_range(u64 start, u64 end, nodeid_t node); +extern int valid_numa_range(paddr_t start, paddr_t end, nodeid_t node); -void srat_parse_regions(u64 addr); -extern u8 __node_distance(nodeid_t a, nodeid_t b); +void srat_parse_regions(paddr_t addr); +extern uint8_t __node_distance(nodeid_t a, nodeid_t b); unsigned int arch_get_dma_bitsize(void); #endif -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |