[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 02/14] x86: improve psr scheduling code



Switching RMID from previous vcpu to next vcpu only needs to write
MSR_IA32_PSR_ASSOC once. Write it with the value of next vcpu is enough,
no need to write '0' first. Idle domain has RMID set to 0 and because MSR
is already updated lazily, so just switch it as it does.

Also move the initialization of per-CPU variable which used for lazy
update from context switch to CPU starting.

Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cahnges in v6:
* blankline fix.
Changes in v5:
* use this_cpu() rather than per_cpu().
Changes in v4:
* Move psr_assoc_reg_read/psr_assoc_reg_write into psr_ctxt_switch_to.
* Use 0 instead of smp_processor_id() for boot cpu.
* add cpu parameter to psr_assoc_init.
Changes in v2:
* Move initialization for psr_assoc from context switch to CPU_STARTING.
---
 xen/arch/x86/domain.c     |  7 ++---
 xen/arch/x86/psr.c        | 71 +++++++++++++++++++++++++++++++++--------------
 xen/include/asm-x86/psr.h |  2 +-
 3 files changed, 53 insertions(+), 27 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index fcea94b..c26c732 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1444,8 +1444,6 @@ static void __context_switch(void)
     {
         memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
         vcpu_save_fpu(p);
-        if ( psr_cmt_enabled() )
-            psr_assoc_rmid(0);
         p->arch.ctxt_switch_from(p);
     }
 
@@ -1470,11 +1468,10 @@ static void __context_switch(void)
         }
         vcpu_restore_fpu_eager(n);
         n->arch.ctxt_switch_to(n);
-
-        if ( psr_cmt_enabled() && n->domain->arch.psr_rmid > 0 )
-            psr_assoc_rmid(n->domain->arch.psr_rmid);
     }
 
+    psr_ctxt_switch_to(n->domain);
+
     gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
                                   per_cpu(compat_gdt_table, cpu);
     if ( need_full_gdt(n) )
diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c
index 344de3c..2490d22 100644
--- a/xen/arch/x86/psr.c
+++ b/xen/arch/x86/psr.c
@@ -22,7 +22,6 @@
 
 struct psr_assoc {
     uint64_t val;
-    bool_t initialized;
 };
 
 struct psr_cmt *__read_mostly psr_cmt;
@@ -122,14 +121,6 @@ static void __init init_psr_cmt(unsigned int rmid_max)
     printk(XENLOG_INFO "Cache Monitoring Technology enabled\n");
 }
 
-static int __init init_psr(void)
-{
-    if ( (opt_psr & PSR_CMT) && opt_rmid_max )
-        init_psr_cmt(opt_rmid_max);
-    return 0;
-}
-__initcall(init_psr);
-
 /* Called with domain lock held, no psr specific lock needed */
 int psr_alloc_rmid(struct domain *d)
 {
@@ -175,27 +166,65 @@ void psr_free_rmid(struct domain *d)
     d->arch.psr_rmid = 0;
 }
 
-void psr_assoc_rmid(unsigned int rmid)
+static inline void psr_assoc_init(void)
 {
-    uint64_t val;
-    uint64_t new_val;
     struct psr_assoc *psra = &this_cpu(psr_assoc);
 
-    if ( !psra->initialized )
-    {
+    if ( psr_cmt_enabled() )
         rdmsrl(MSR_IA32_PSR_ASSOC, psra->val);
-        psra->initialized = 1;
-    }
-    val = psra->val;
+}
+
+static inline void psr_assoc_rmid(uint64_t *reg, unsigned int rmid)
+{
+    *reg = (*reg & ~rmid_mask) | (rmid & rmid_mask);
+}
+
+void psr_ctxt_switch_to(struct domain *d)
+{
+    struct psr_assoc *psra = &this_cpu(psr_assoc);
+    uint64_t reg = psra->val;
+
+    if ( psr_cmt_enabled() )
+        psr_assoc_rmid(&reg, d->arch.psr_rmid);
 
-    new_val = (val & ~rmid_mask) | (rmid & rmid_mask);
-    if ( val != new_val )
+    if ( reg != psra->val )
     {
-        wrmsrl(MSR_IA32_PSR_ASSOC, new_val);
-        psra->val = new_val;
+        wrmsrl(MSR_IA32_PSR_ASSOC, reg);
+        psra->val = reg;
     }
 }
 
+static void psr_cpu_init(void)
+{
+    psr_assoc_init();
+}
+
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    if ( action == CPU_STARTING )
+        psr_cpu_init();
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
+static int __init psr_presmp_init(void)
+{
+    if ( (opt_psr & PSR_CMT) && opt_rmid_max )
+        init_psr_cmt(opt_rmid_max);
+
+    psr_cpu_init();
+    if ( psr_cmt_enabled() )
+        register_cpu_notifier(&cpu_nfb);
+
+    return 0;
+}
+presmp_initcall(psr_presmp_init);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/psr.h b/xen/include/asm-x86/psr.h
index c6076e9..12d593b 100644
--- a/xen/include/asm-x86/psr.h
+++ b/xen/include/asm-x86/psr.h
@@ -46,7 +46,7 @@ static inline bool_t psr_cmt_enabled(void)
 
 int psr_alloc_rmid(struct domain *d);
 void psr_free_rmid(struct domain *d);
-void psr_assoc_rmid(unsigned int rmid);
+void psr_ctxt_switch_to(struct domain *d);
 
 #endif /* __ASM_PSR_H__ */
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.