[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 3/2] x86: Clean up TSS types and variable names



The _struct suffix on tss_struct is quite redundant.  Rename it to tss64 to
mirror the existing tss32 structure we have in HVM's Task Switch logic.

The per-cpu name having an init_ prefix is also wrong.  There is exactly one
TSS for each CPU, which is used for the lifetime of the system.  Drop the
redirection and update all callers to use tss_page properly.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>

RFC, and based on my requested changes for patch 2.
---
 xen/arch/x86/cpu/common.c       | 8 ++++----
 xen/arch/x86/hvm/vmx/vmcs.c     | 2 +-
 xen/arch/x86/smpboot.c          | 2 +-
 xen/arch/x86/traps.c            | 8 ++++----
 xen/include/asm-x86/processor.h | 5 ++---
 5 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index dc2dea4d6d..797d970d9a 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -707,7 +707,7 @@ void load_system_tables(void)
        unsigned long stack_bottom = get_stack_bottom(),
                stack_top = stack_bottom & ~(STACK_SIZE - 1);
 
-       struct tss_struct *tss = &this_cpu(init_tss);
+       struct tss64 *tss = &this_cpu(tss_page).tss;
        seg_desc_t *gdt =
                this_cpu(gdt) - FIRST_RESERVED_GDT_ENTRY;
        seg_desc_t *compat_gdt =
@@ -722,7 +722,7 @@ void load_system_tables(void)
                .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1,
        };
 
-       *tss = (struct tss_struct){
+       *tss = (struct tss64){
                /* Main stack for interrupts/exceptions. */
                .rsp0 = stack_bottom,
 
@@ -750,12 +750,12 @@ void load_system_tables(void)
        _set_tssldt_desc(
                gdt + TSS_ENTRY,
                (unsigned long)tss,
-               offsetof(struct tss_struct, __cacheline_filler) - 1,
+               offsetof(struct tss64, __cacheline_filler) - 1,
                SYS_DESC_tss_avail);
        _set_tssldt_desc(
                compat_gdt + TSS_ENTRY,
                (unsigned long)tss,
-               offsetof(struct tss_struct, __cacheline_filler) - 1,
+               offsetof(struct tss64, __cacheline_filler) - 1,
                SYS_DESC_tss_busy);
 
        per_cpu(full_gdt_loaded, cpu) = false;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 098613822a..c438befe35 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -796,7 +796,7 @@ static void vmx_set_host_env(struct vcpu *v)
               (unsigned long)(this_cpu(gdt) - FIRST_RESERVED_GDT_ENTRY));
     __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
 
-    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
+    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(tss_page, cpu).tss);
 
     __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
 
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 8d5fef0012..4833891bda 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -825,7 +825,7 @@ static int setup_cpu_root_pgt(unsigned int cpu)
     if ( !rc )
         rc = clone_mapping(idt_tables[cpu], rpt);
     if ( !rc )
-        rc = clone_mapping(&per_cpu(init_tss, cpu), rpt);
+        rc = clone_mapping(&per_cpu(tss_page, cpu).tss, rpt);
     if ( !rc )
         rc = clone_mapping((void *)per_cpu(stubs.addr, cpu), rpt);
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 252fe6d792..b143def51c 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -565,7 +565,7 @@ void show_stack_overflow(unsigned int cpu, const struct 
cpu_user_regs *regs)
 
     printk("Valid stack range: %p-%p, sp=%p, tss.rsp0=%p\n",
            (void *)esp_top, (void *)esp_bottom, (void *)esp,
-           (void *)per_cpu(init_tss, cpu).rsp0);
+           (void *)per_cpu(tss_page, cpu).tss.rsp0);
 
     /*
      * Trigger overflow trace if %esp is anywhere within the guard page, or
@@ -1903,7 +1903,7 @@ static void __init set_intr_gate(unsigned int n, void 
*addr)
 
 void load_TR(void)
 {
-    struct tss_struct *tss = &this_cpu(init_tss);
+    struct tss64 *tss = &this_cpu(tss_page).tss;
     struct desc_ptr old_gdt, tss_gdt = {
         .base = (long)(this_cpu(gdt) - FIRST_RESERVED_GDT_ENTRY),
         .limit = LAST_RESERVED_GDT_BYTE
@@ -1912,12 +1912,12 @@ void load_TR(void)
     _set_tssldt_desc(
         this_cpu(gdt) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
         (unsigned long)tss,
-        offsetof(struct tss_struct, __cacheline_filler) - 1,
+        offsetof(struct tss64, __cacheline_filler) - 1,
         SYS_DESC_tss_avail);
     _set_tssldt_desc(
         this_cpu(compat_gdt) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
         (unsigned long)tss,
-        offsetof(struct tss_struct, __cacheline_filler) - 1,
+        offsetof(struct tss64, __cacheline_filler) - 1,
         SYS_DESC_tss_busy);
 
     /* Switch to non-compat GDT (which has B bit clear) to execute LTR. */
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 6f180775f4..4c2710ca2b 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -411,7 +411,7 @@ static always_inline void __mwait(unsigned long eax, 
unsigned long ecx)
 #define IOBMP_BYTES             8192
 #define IOBMP_INVALID_OFFSET    0x8000
 
-struct __packed tss_struct {
+struct __packed tss64 {
     uint32_t :32;
     uint64_t rsp0, rsp1, rsp2;
     uint64_t :64;
@@ -426,10 +426,9 @@ struct __packed tss_struct {
     uint8_t __cacheline_filler[24];
 };
 struct tss_page {
-    struct tss_struct __aligned(PAGE_SIZE) tss;
+    struct tss64 __aligned(PAGE_SIZE) tss;
 };
 DECLARE_PER_CPU(struct tss_page, tss_page);
-#define per_cpu__init_tss get_per_cpu_var(tss_page.tss)
 
 #define IST_NONE 0UL
 #define IST_DF   1UL
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.