[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-ia64-devel][PATCH]Build new infrastructure for fast fault handling path.



Hi Anthony.

I tracked it down.
The key is perfc=y and perfc_arrays=y. I suppose you didn't enabled those.
I can boot VTi domain without panic with the following patch.

While debuging, I noticed that switch_mm_mode() also calls
panic_domain() which isn't usually called. If panic_domain() is called
on the fast path, the vmm itself would panic.
Could you refine it?
And could you add comments to the new fast C routine to warn?

diff -r 2ce591cbc564 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed May 14 12:31:37 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed May 14 14:15:36 2008 +0900
@@ -180,7 +180,7 @@
        ia64_srlz_d();
 }
 
-void
+static void
 switch_to_physical_rid(VCPU *vcpu)
 {
     u64 psr;
@@ -208,7 +208,7 @@
     return;
 }
 
-void
+static void
 switch_to_virtual_rid(VCPU *vcpu)
 {
     u64 psr;
@@ -228,12 +228,13 @@
     return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
 }
 
-void
-switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
+static void
+__switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr, int is_fast)
 {
     int act;
     act = mm_switch_action(old_psr, new_psr);
-    perfc_incra(vmx_switch_mm_mode, act);
+    if (unlikely(!is_fast))
+        perfc_incra(vmx_switch_mm_mode, act);
     switch (act) {
     case SW_2P_DT:
         vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
@@ -252,8 +253,9 @@
         switch_to_virtual_rid(vcpu);
         break;
     case SW_SELF:
-//        printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
-//            old_psr.val);
+        if (unlikely(!is_fast))
+            printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
+                   old_psr.val);
         break;
     case SW_NOP:
 //        printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
@@ -267,6 +269,21 @@
         break;
     }
     return;
+}
+
+void
+switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
+{
+    __switch_mm_mode(vcpu, old_psr, new_psr, 0);
+}
+
+void
+switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
+{
+    /* When fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
+     * so that no tlb miss is allowed.
+     */
+    __switch_mm_mode(vcpu, old_psr, new_psr, 1);
 }
 
 void
diff -r 2ce591cbc564 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Wed May 14 12:31:37 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Wed May 14 14:15:36 2008 +0900
@@ -298,7 +298,7 @@
 
     ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
 
-    switch_mm_mode(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
+    switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
 }
 
 
@@ -348,7 +348,7 @@
 
     ia64_setreg(_IA64_REG_CR_IIP, VCPU(vcpu, iip));
 
-    switch_mm_mode(vcpu, (IA64_PSR)vpsr, (IA64_PSR)vipsr);
+    switch_mm_mode_fast(vcpu, (IA64_PSR)vpsr, (IA64_PSR)vipsr);
 }
 
 
@@ -369,7 +369,7 @@
     mipsr |= imm24 & (~IA64_PSR_PP);
     ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
 
-    switch_mm_mode(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
+    switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
 }
 
 void vmx_vcpu_rsm_fast(VCPU *vcpu, u64 imm24)
@@ -394,7 +394,7 @@
 
     ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
 
-    switch_mm_mode(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
+    switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
 }
 
 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
diff -r 2ce591cbc564 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Wed May 14 12:31:37 2008 +0900
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Wed May 14 14:15:36 2008 +0900
@@ -76,9 +76,8 @@
 #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
 
 extern void physical_mode_init(VCPU *);
-extern void switch_to_physical_rid(VCPU *);
-extern void switch_to_virtual_rid(VCPU *vcpu);
 extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
+extern void switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR 
new_psr);
 extern void check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR 
new_psr);
 extern void prepare_if_physical_mode(VCPU *vcpu);
 extern void recover_if_physical_mode(VCPU *vcpu);


On Mon, May 12, 2008 at 04:12:28PM +0900, Isaku Yamahata wrote:
> On Mon, May 12, 2008 at 11:27:29AM +0800, Xu, Anthony wrote:
> > Hi isaku,
> > 
> > I can't reprocude it.
> > I had booted guest rhel4-2 and windows server 2003 SP1 with intel &
> > opensource Guest FW.
> > 
> > What's your FW version and guest OS version?
> 
> GFW: open gfw of change set 124:ececa5a5473b (prebuild one)
> 
> kernel: 
> - Linux version 2.6.9-22.EL (bhcompile@xxxxxxxxxxxxxxxxxxxxxx) (gcc
>   version 3.4.4 20050721 (Red Hat 3.4.4-2)) #1 SMP Mon Sep 19 17:54:55
>   EDT 2005
> - Linux 2.6.18 + xen (My local compile)
> - Windows 2003
> 

-- 
yamahata

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.