[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v2 01/25] x86: NUMA: Clean up: Drop trailing spaces



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

Fix coding style, trailing spaces, tabs in NUMA code.
Also drop unused macros and functions.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>
---
 xen/arch/x86/numa.c        | 47 +++++++++++++++++++++-------------------------
 xen/arch/x86/srat.c        |  2 +-
 xen/include/asm-x86/numa.h | 43 ++++++++++++++++--------------------------
 3 files changed, 38 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 6f4d438..8ee2302 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -1,8 +1,8 @@
-/* 
+/*
  * Generic VM initialization for x86-64 NUMA setups.
  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
  * Adapted for Xen: Ryan Harper <ryanh@xxxxxxxxxx>
- */ 
+ */
 
 #include <xen/mm.h>
 #include <xen/string.h>
@@ -21,13 +21,6 @@
 static int numa_setup(char *s);
 custom_param("numa", numa_setup);
 
-#ifndef Dprintk
-#define Dprintk(x...)
-#endif
-
-/* from proto.h */
-#define round_up(x,y) ((((x)+(y))-1) & (~((y)-1)))
-
 struct node_data node_data[MAX_NUMNODES];
 
 /* Mapping from pdx to node id */
@@ -144,8 +137,9 @@ static int __init extract_lsb_from_nodes(const struct node 
*nodes,
     if ( nodes_used <= 1 )
         i = BITS_PER_LONG - 1;
     else
-        i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
+        i = find_first_bit(&bitfield, sizeof(unsigned long) * 8);
     memnodemapsize = (memtop >> i) + 1;
+
     return i;
 }
 
@@ -173,7 +167,7 @@ int __init compute_hash_shift(struct node *nodes, int 
numnodes,
 }
 /* initialize NODE_DATA given nodeid and start/end */
 void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end)
-{ 
+{
     unsigned long start_pfn, end_pfn;
 
     start_pfn = start >> PAGE_SHIFT;
@@ -183,7 +177,7 @@ void __init setup_node_bootmem(nodeid_t nodeid, u64 start, 
u64 end)
     NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
 
     node_set_online(nodeid);
-} 
+}
 
 void __init numa_init_array(void)
 {
@@ -214,7 +208,7 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
 {
     int i;
     struct node nodes[MAX_NUMNODES];
-    u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
+    u64 sz = ((end_pfn - start_pfn) << PAGE_SHIFT) / numa_fake;
 
     /* Kludge needed for the hash function */
     if ( hweight64(sz) > 1 )
@@ -222,21 +216,22 @@ static int __init numa_emulation(u64 start_pfn, u64 
end_pfn)
         u64 x = 1;
         while ( (x << 1) < sz )
             x <<= 1;
-        if ( x < sz/2 )
-            printk(KERN_ERR "Numa emulation unbalanced. Complain to 
maintainer\n");
+        if ( x < sz / 2 )
+            printk(KERN_ERR
+                   "Numa emulation unbalanced. Complain to maintainer\n");
         sz = x;
     }
 
     memset(&nodes,0,sizeof(nodes));
     for ( i = 0; i < numa_fake; i++ )
     {
-        nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
+        nodes[i].start = (start_pfn << PAGE_SHIFT) + i * sz;
         if ( i == numa_fake - 1 )
-            sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
+            sz = (end_pfn << PAGE_SHIFT) - nodes[i].start;
         nodes[i].end = nodes[i].start + sz;
-        printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" 
(%"PRIu64"MB)\n",
-               i,
-               nodes[i].start, nodes[i].end,
+        printk(KERN_INFO
+               "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n",
+               i, nodes[i].start, nodes[i].end,
                (nodes[i].end - nodes[i].start) >> 20);
         node_set_online(i);
     }
@@ -256,7 +251,7 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
 #endif
 
 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
-{ 
+{
     int i;
 
 #ifdef CONFIG_NUMA_EMU
@@ -291,7 +286,7 @@ void __init numa_initmem_init(unsigned long start_pfn, 
unsigned long end_pfn)
 void numa_add_cpu(int cpu)
 {
     cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
-} 
+}
 
 void numa_set_node(int cpu, nodeid_t node)
 {
@@ -299,8 +294,8 @@ void numa_set_node(int cpu, nodeid_t node)
 }
 
 /* [numa=off] */
-static __init int numa_setup(char *opt) 
-{ 
+static __init int numa_setup(char *opt)
+{
     if ( !strncmp(opt,"off",3) )
         numa_off = 1;
     if ( !strncmp(opt,"on",2) )
@@ -323,7 +318,7 @@ static __init int numa_setup(char *opt)
 #endif
 
     return 1;
-} 
+}
 
 /*
  * Setup early cpu_to_node.
@@ -385,7 +380,7 @@ static void dump_numa(unsigned char key)
     const struct vnuma_info *vnuma;
 
     printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key,
-           (u32)(now>>32), (u32)now);
+           (u32)(now >> 32), (u32)now);
 
     for_each_online_node ( i )
     {
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index d86783e..d270b75 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -7,7 +7,7 @@
  * Called from acpi_numa_init while reading the SRAT and SLIT tables.
  * Assumes all memory regions belonging to a single proximity domain
  * are in one chunk. Holes between them will be included in the node.
- * 
+ *
  * Adapted for Xen: Ryan Harper <ryanh@xxxxxxxxxx>
  */
 
diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
index 2479238..da8a459 100644
--- a/xen/include/asm-x86/numa.h
+++ b/xen/include/asm-x86/numa.h
@@ -1,4 +1,4 @@
-#ifndef _ASM_X8664_NUMA_H 
+#ifndef _ASM_X8664_NUMA_H
 #define _ASM_X8664_NUMA_H 1
 
 #include <xen/cpumask.h>
@@ -12,21 +12,20 @@ extern int srat_rev;
 extern nodeid_t      cpu_to_node[NR_CPUS];
 extern cpumask_t     node_to_cpumask[];
 
-#define cpu_to_node(cpu)               (cpu_to_node[cpu])
-#define parent_node(node)              (node)
+#define cpu_to_node(cpu)         (cpu_to_node[cpu])
+#define parent_node(node)        (node)
 #define node_to_first_cpu(node)  (__ffs(node_to_cpumask[node]))
 #define node_to_cpumask(node)    (node_to_cpumask[node])
 
-struct node { 
-       u64 start,end; 
+struct node {
+    u64 start,end;
 };
 
 extern int compute_hash_shift(struct node *nodes, int numnodes,
-                             nodeid_t *nodeids);
+                              nodeid_t *nodeids);
 extern nodeid_t pxm_to_node(unsigned int pxm);
 
 #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
-#define VIRTUAL_BUG_ON(x) 
 
 extern void numa_add_cpu(int cpu);
 extern void numa_init_array(void);
@@ -42,14 +41,8 @@ extern void setup_node_bootmem(nodeid_t nodeid, u64 start, 
u64 end);
 extern nodeid_t apicid_to_node[];
 extern void init_cpu_to_node(void);
 
-static inline void clear_node_cpumask(int cpu)
-{
-       cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
-}
-
 /* Simple perfect hash to map pdx to node numbers */
-extern int memnode_shift; 
-extern unsigned long memnodemapsize;
+extern int memnode_shift;
 extern u8 *memnodemap;
 
 struct node_data {
@@ -60,20 +53,16 @@ struct node_data {
 extern struct node_data node_data[];
 
 static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
-{ 
-       nodeid_t nid;
-       VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize);
-       nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift]; 
-       VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); 
-       return nid; 
-} 
-
-#define NODE_DATA(nid)         (&(node_data[nid]))
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_spanned_pages(nid)        (NODE_DATA(nid)->node_spanned_pages)
+{
+    return memnodemap[paddr_to_pdx(addr) >> memnode_shift];
+}
+
+#define NODE_DATA(nid)          (&(node_data[nid]))
+
+#define node_start_pfn(nid)     (NODE_DATA(nid)->node_start_pfn)
+#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
 #define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn + \
-                                NODE_DATA(nid)->node_spanned_pages)
+                                 NODE_DATA(nid)->node_spanned_pages)
 
 extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.