[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 7/7] Rename sections for compatibility with -ffunction-sections -fdata-sections



Hi Ross,

On 06/05/16 16:48, Ross Lagerwall wrote:
When building with -ffunction-sections -fdata-sections, it will generate
section names like .text.show_handlers and .data.payload_list. These
sections are in the same namespace as the special sections that Xen
uses, such as .text.kexec and .data.schedulers. To prevent conflicts,
prefix Xen's special sections with an extra period.

The idea for this was taken from a similar patch series applied to the
Linux kernel by the kSplice folks.

Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>

For the ARM parts:

Acked-by: Julien Grall <julien.grall@xxxxxxx>

Regards,

---
  xen/arch/arm/xen.lds.S            | 14 +++++++-------
  xen/arch/x86/boot/x86_64.S        |  2 +-
  xen/arch/x86/hvm/hvm.c            |  2 +-
  xen/arch/x86/mm.c                 |  4 ++--
  xen/arch/x86/setup.c              |  2 +-
  xen/arch/x86/x86_64/kexec_reloc.S |  2 +-
  xen/arch/x86/xen.lds.S            | 18 +++++++++---------
  xen/include/asm-arm/cache.h       |  2 +-
  xen/include/asm-arm/percpu.h      |  2 +-
  xen/include/asm-x86/cache.h       |  2 +-
  xen/include/asm-x86/percpu.h      |  2 +-
  xen/include/xen/sched-if.h        |  2 +-
  12 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 1f010bd..ec6c389 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -74,11 +74,11 @@ SECTIONS

    .data : {                    /* Data */
         . = ALIGN(PAGE_SIZE);
-       *(.data.page_aligned)
+       *(.data..page_aligned)
         *(.data)
         . = ALIGN(8);
         __start_schedulers_array = .;
-       *(.data.schedulers)
+       *(.data..schedulers)
         __end_schedulers_array = .;
         *(.data.rel)
         *(.data.rel.*)
@@ -97,7 +97,7 @@ SECTIONS
         *(.ex_table.pre)
         __stop___pre_ex_table = .;

-       *(.data.read_mostly)
+       *(.data..read_mostly)
         *(.data.rel.ro)
         *(.data.rel.ro.*)
    } :text
@@ -173,15 +173,15 @@ SECTIONS

    .bss : {                     /* BSS */
         __bss_start = .;
-       *(.bss.stack_aligned)
+       *(.bss..stack_aligned)
         . = ALIGN(PAGE_SIZE);
-       *(.bss.page_aligned)
+       *(.bss..page_aligned)
         *(.bss)
         . = ALIGN(SMP_CACHE_BYTES);
         __per_cpu_start = .;
-       *(.bss.percpu)
+       *(.bss..percpu)
         . = ALIGN(SMP_CACHE_BYTES);
-       *(.bss.percpu.read_mostly)
+       *(.bss..percpu.read_mostly)
         . = ALIGN(SMP_CACHE_BYTES);
         __per_cpu_data_end = .;
         __bss_end = .;
diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S
index 9ab9231..12b0ba8 100644
--- a/xen/arch/x86/boot/x86_64.S
+++ b/xen/arch/x86/boot/x86_64.S
@@ -51,7 +51,7 @@ GLOBAL(gdt_descr)
  GLOBAL(stack_start)
          .quad   cpu0_stack

-        .section .data.page_aligned, "aw", @progbits
+        .section .data..page_aligned, "aw", @progbits
          .align PAGE_SIZE, 0
  GLOBAL(boot_cpu_gdt_table)
          .quad 0x0000000000000000     /* unused */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 82e2ed1..3eb2369 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -88,7 +88,7 @@ struct hvm_function_table hvm_funcs __read_mostly;
   * the hardware domain which needs a more permissive one.
   */
  #define HVM_IOBITMAP_SIZE (3 * PAGE_SIZE)
-unsigned long __section(".bss.page_aligned")
+unsigned long __section(".bss..page_aligned")
      hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG];

  /* Xen command-line option to enable HAP */
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2bb920b..5b59f7d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -125,7 +125,7 @@
  #include <asm/pci.h>

  /* Mapping of the fixmap space needed early. */
-l1_pgentry_t __section(".bss.page_aligned") l1_fixmap[L1_PAGETABLE_ENTRIES];
+l1_pgentry_t __section(".bss..page_aligned") l1_fixmap[L1_PAGETABLE_ENTRIES];

  #define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)

@@ -589,7 +589,7 @@ static inline void guest_get_eff_kern_l1e(struct vcpu *v, 
unsigned long addr,
      TOGGLE_MODE();
  }

-const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
+const char __section(".bss..page_aligned.const") zero_page[PAGE_SIZE];

  static void invalidate_shadow_ldt(struct vcpu *v, int flush)
  {
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 5029568..7d30945 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -103,7 +103,7 @@ unsigned long __read_mostly xen_virt_end;

  DEFINE_PER_CPU(struct tss_struct, init_tss);

-char __section(".bss.stack_aligned") cpu0_stack[STACK_SIZE];
+char __section(".bss..stack_aligned") cpu0_stack[STACK_SIZE];

  struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };

diff --git a/xen/arch/x86/x86_64/kexec_reloc.S 
b/xen/arch/x86/x86_64/kexec_reloc.S
index 85ab602..8ce87ec 100644
--- a/xen/arch/x86/x86_64/kexec_reloc.S
+++ b/xen/arch/x86/x86_64/kexec_reloc.S
@@ -18,7 +18,7 @@
  #include <asm/page.h>
  #include <asm/machine_kexec.h>

-        .section .text.kexec, "ax", @progbits
+        .section .text..kexec, "ax", @progbits
          .align PAGE_SIZE
          .code64

diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S
index b14bcd2..aa9467d 100644
--- a/xen/arch/x86/xen.lds.S
+++ b/xen/arch/x86/xen.lds.S
@@ -52,7 +52,7 @@ SECTIONS
         *(.text.cold)
         *(.text.unlikely)
         *(.fixup)
-       *(.text.kexec)
+       *(.text..kexec)
         *(.gnu.warning)
         _etext = .;             /* End of text section */
    } :text = 0x9090
@@ -207,11 +207,11 @@ SECTIONS

    __2M_rwdata_start = .;       /* Start of 2M superpages, mapped RW. */
    . = ALIGN(SMP_CACHE_BYTES);
-  .data.read_mostly : {
-       *(.data.read_mostly)
+  .data..read_mostly : {
+       *(.data..read_mostly)
         . = ALIGN(8);
         __start_schedulers_array = .;
-       *(.data.schedulers)
+       *(.data..schedulers)
         __end_schedulers_array = .;
         *(.data.rel.ro)
         *(.data.rel.ro.*)
@@ -219,7 +219,7 @@ SECTIONS

    .data : {                    /* Data */
         . = ALIGN(PAGE_SIZE);
-       *(.data.page_aligned)
+       *(.data..page_aligned)
         *(.data)
         *(.data.rel)
         *(.data.rel.*)
@@ -229,15 +229,15 @@ SECTIONS
    .bss : {                     /* BSS */
         . = ALIGN(STACK_SIZE);
         __bss_start = .;
-       *(.bss.stack_aligned)
+       *(.bss..stack_aligned)
         . = ALIGN(PAGE_SIZE);
-       *(.bss.page_aligned*)
+       *(.bss..page_aligned*)
         *(.bss)
         . = ALIGN(SMP_CACHE_BYTES);
         __per_cpu_start = .;
-       *(.bss.percpu)
+       *(.bss..percpu)
         . = ALIGN(SMP_CACHE_BYTES);
-       *(.bss.percpu.read_mostly)
+       *(.bss..percpu.read_mostly)
         . = ALIGN(SMP_CACHE_BYTES);
         __per_cpu_data_end = .;
         __bss_end = .;
diff --git a/xen/include/asm-arm/cache.h b/xen/include/asm-arm/cache.h
index 2de6564..7723b06 100644
--- a/xen/include/asm-arm/cache.h
+++ b/xen/include/asm-arm/cache.h
@@ -7,7 +7,7 @@
  #define L1_CACHE_SHIFT  (CONFIG_ARM_L1_CACHE_SHIFT)
  #define L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)

-#define __read_mostly __section(".data.read_mostly")
+#define __read_mostly __section(".data..read_mostly")

  #endif
  /*
diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h
index 7968532..f4d9628 100644
--- a/xen/include/asm-arm/percpu.h
+++ b/xen/include/asm-arm/percpu.h
@@ -19,7 +19,7 @@ void percpu_init_areas(void);

  /* Separate out the type, so (int[3], foo) works. */
  #define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
+    __section(".bss..percpu" #suffix)                            \
      __typeof__(type) per_cpu_##name

  #define per_cpu(var, cpu)  \
diff --git a/xen/include/asm-x86/cache.h b/xen/include/asm-x86/cache.h
index f4a08e7..5aedb85 100644
--- a/xen/include/asm-x86/cache.h
+++ b/xen/include/asm-x86/cache.h
@@ -10,6 +10,6 @@
  #define L1_CACHE_SHIFT        (CONFIG_X86_L1_CACHE_SHIFT)
  #define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)

-#define __read_mostly __section(".data.read_mostly")
+#define __read_mostly __section(".data..read_mostly")

  #endif
diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
index 51562b9..5d442c3 100644
--- a/xen/include/asm-x86/percpu.h
+++ b/xen/include/asm-x86/percpu.h
@@ -9,7 +9,7 @@ void percpu_init_areas(void);

  /* Separate out the type, so (int[3], foo) works. */
  #define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
+    __section(".bss..percpu" #suffix)                            \
      __typeof__(type) per_cpu_##name

  /* var is in discarded region: offset to particular copy we want */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index bc0e794..78c6462 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -175,7 +175,7 @@ struct scheduler {
  };

  #define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
-  __used_section(".data.schedulers") = &x;
+  __used_section(".data..schedulers") = &x;

  struct cpupool
  {


--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.