[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86: optimize loading of GDT at context switch



commit 1464c4cdd719fa5ab08b710218a866780a694dca
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Fri Jul 26 10:43:42 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Jul 26 10:43:42 2019 +0200

    x86: optimize loading of GDT at context switch
    
    Instead of dynamically decide whether the previous vcpu was using full
    or default GDT just add a percpu variable for that purpose. This at
    once removes the need for testing vcpu_ids to differ twice.
    
    This change improves performance by 0.5% - 1% on my test machine when
    doing parallel compilation.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/common.c  |  3 +++
 xen/arch/x86/domain.c      | 13 ++++++++-----
 xen/include/asm-x86/desc.h |  1 +
 3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 1db96d959c..7478e21177 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -49,6 +49,8 @@ unsigned int vaddr_bits __read_mostly = VADDR_BITS;
 static unsigned int cleared_caps[NCAPINTS];
 static unsigned int forced_caps[NCAPINTS];
 
+DEFINE_PER_CPU(bool, full_gdt_loaded);
+
 void __init setup_clear_cpu_cap(unsigned int cap)
 {
        const uint32_t *dfs;
@@ -756,6 +758,7 @@ void load_system_tables(void)
                offsetof(struct tss_struct, __cacheline_filler) - 1,
                SYS_DESC_tss_busy);
 
+       per_cpu(full_gdt_loaded, cpu) = false;
        lgdt(&gdtr);
        lidt(&idtr);
        ltr(TSS_ENTRY << 3);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ea55160887..5933b3f51b 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1670,7 +1670,7 @@ static void update_xen_slot_in_full_gdt(const struct vcpu 
*v, unsigned int cpu)
                                    : per_cpu(compat_gdt_table_l1e, cpu));
 }
 
-static void load_full_gdt(const struct vcpu *v)
+static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
 {
     struct desc_ptr gdt_desc = {
         .limit = LAST_RESERVED_GDT_BYTE,
@@ -1678,6 +1678,8 @@ static void load_full_gdt(const struct vcpu *v)
     };
 
     lgdt(&gdt_desc);
+
+    per_cpu(full_gdt_loaded, cpu) = true;
 }
 
 static void load_default_gdt(unsigned int cpu)
@@ -1689,6 +1691,8 @@ static void load_default_gdt(unsigned int cpu)
     };
 
     lgdt(&gdt_desc);
+
+    per_cpu(full_gdt_loaded, cpu) = false;
 }
 
 static void __context_switch(void)
@@ -1740,7 +1744,7 @@ static void __context_switch(void)
     if ( need_full_gdt(nd) )
         update_xen_slot_in_full_gdt(n, cpu);
 
-    if ( need_full_gdt(pd) &&
+    if ( per_cpu(full_gdt_loaded, cpu) &&
          ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(nd)) )
         load_default_gdt(cpu);
 
@@ -1753,9 +1757,8 @@ static void __context_switch(void)
         svm_load_segs(0, 0, 0, 0, 0, 0, 0);
 #endif
 
-    if ( need_full_gdt(nd) &&
-         ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(pd)) )
-        load_full_gdt(n);
+    if ( need_full_gdt(nd) && !per_cpu(full_gdt_loaded, cpu) )
+        load_full_gdt(n, cpu);
 
     if ( pd != nd )
         cpumask_clear_cpu(cpu, pd->dirty_cpumask);
diff --git a/xen/include/asm-x86/desc.h b/xen/include/asm-x86/desc.h
index e565727dc0..c011c03ae2 100644
--- a/xen/include/asm-x86/desc.h
+++ b/xen/include/asm-x86/desc.h
@@ -210,6 +210,7 @@ DECLARE_PER_CPU(l1_pgentry_t, gdt_table_l1e);
 extern seg_desc_t boot_cpu_compat_gdt_table[];
 DECLARE_PER_CPU(seg_desc_t *, compat_gdt_table);
 DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_table_l1e);
+DECLARE_PER_CPU(bool, full_gdt_loaded);
 
 extern void load_TR(void);
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.