[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] optimize vpsr



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 4834d1e8f26ef9eb85abb3c482b12303354d2c34
# Parent  000789c36d289225fcce7ecee290ae7b3d808058
[IA64] optimize vpsr

vpsr can't keep track flowing bits of guest psr
be,up,ac,mfl,mfh,cpl,ri.
Previously every time xen gets control, xen will sync
vpsr with cr.ipsr, it's not neccessary.
Xen sync with cr.ipsr when needed.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
 xen/arch/ia64/vmx/vlsapic.c       |    6 ++--
 xen/arch/ia64/vmx/vmmu.c          |    4 +-
 xen/arch/ia64/vmx/vmx_interrupt.c |    2 -
 xen/arch/ia64/vmx/vmx_minstate.h  |   24 +++--------------
 xen/arch/ia64/vmx/vmx_phy_mode.c  |    4 --
 xen/arch/ia64/vmx/vmx_process.c   |    6 ++--
 xen/arch/ia64/vmx/vmx_utility.c   |    2 -
 xen/arch/ia64/vmx/vmx_vcpu.c      |   52 ++++++++++++++++++++------------------
 xen/arch/ia64/vmx/vmx_virt.c      |   25 +++++-------------
 9 files changed, 52 insertions(+), 73 deletions(-)

diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vlsapic.c       Fri Jul 14 11:05:40 2006 -0600
@@ -570,7 +570,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
     }
     h_inservice = highest_inservice_irq(vcpu);
 
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     mask = irq_masked(vcpu, h_pending, h_inservice);
     if (  vpsr.i && IRQ_NO_MASKED == mask ) {
         isr = vpsr.val & IA64_PSR_RI;
@@ -654,7 +654,7 @@ static void generate_exirq(VCPU *vcpu)
     IA64_PSR    vpsr;
     uint64_t    isr;
     REGS *regs=vcpu_regs(vcpu);
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     update_vhpi(vcpu, NULL_VECTOR);
     isr = vpsr.val & IA64_PSR_RI;
     if ( !vpsr.ic )
@@ -668,7 +668,7 @@ void vhpi_detection(VCPU *vcpu)
     tpr_t       vtpr;
     IA64_PSR    vpsr;
     
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     vtpr.val = VCPU(vcpu, tpr);
 
     threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Fri Jul 14 11:05:40 2006 -0600
@@ -268,7 +268,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
     PTA   vpta;
     IA64_PSR  vpsr; 
 
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     vcpu_get_rr(vcpu, vadr, &vrr.rrval);
     vmx_vcpu_get_pta(vcpu,&vpta.val);
 
@@ -622,7 +622,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
     visr.val=0;
     visr.ei=pt_isr.ei;
     visr.ir=pt_isr.ir;
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     if(vpsr.ic==0){
         visr.ni=1;
     }
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Fri Jul 14 11:05:40 2006 -0600
@@ -117,7 +117,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
 {
     IA64_PSR vpsr;
     u64 value;
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
     /* Vol2, Table 8-1 */
     if ( vpsr.ic ) {
         if ( set_ifa){
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_minstate.h
--- a/xen/arch/ia64/vmx/vmx_minstate.h  Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_minstate.h  Fri Jul 14 11:05:40 2006 -0600
@@ -57,8 +57,8 @@
     ;;
 
 
-#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
-    /* begin to call pal vps sync_read and cleanup psr.pl */     \
+#define PAL_VSA_SYNC_READ           \
+    /* begin to call pal vps sync_read */     \
     add r25=IA64_VPD_BASE_OFFSET, r21;       \
     movl r20=__vsa_base;     \
     ;;          \
@@ -68,31 +68,17 @@
     add r20=PAL_VPS_SYNC_READ,r20;  \
     ;;  \
 { .mii;  \
-    add r22=VPD(VPSR),r25;   \
+    nop 0x0;   \
     mov r24=ip;        \
     mov b0=r20;     \
     ;;      \
 };           \
 { .mmb;      \
     add r24 = 0x20, r24;    \
-    mov r16 = cr.ipsr;  /* Temp workaround since psr.ic is off */ \
+    nop 0x0;            \
     br.cond.sptk b0;        /*  call the service */ \
     ;;              \
 };           \
-    ld8 r17=[r22];   \
-    /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
-    extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
-    ;;      \
-    dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2;   \
-    extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
-    ;;      \
-    dep r17=r30, r17, IA64_PSR_BE_BIT, 5;   \
-    extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
-    ;;      \
-    dep r17=r30, r17, IA64_PSR_RI_BIT, 2;   \
-    ;;      \
-    st8 [r22]=r17;      \
-    ;;
 
 
 
@@ -219,7 +205,7 @@
     movl r11=FPSR_DEFAULT;   /* L-unit */                           \
     movl r1=__gp;       /* establish kernel global pointer */               \
     ;;                                          \
-    PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
+    PAL_VSA_SYNC_READ           \
     VMX_MINSTATE_END_SAVE_MIN
 
 /*
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Jul 14 11:05:40 2006 -0600
@@ -110,10 +110,8 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr)
 physical_tlb_miss(VCPU *vcpu, u64 vadr)
 {
     u64 pte;
-    IA64_PSR vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
     pte =  vadr& _PAGE_PPN_MASK;
-    pte = pte|(vpsr.cpl<<7)|PHY_PAGE_WB;
+    pte = pte | PHY_PAGE_WB;
     thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
     return;
 }
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c   Fri Jul 14 11:05:40 2006 -0600
@@ -82,7 +82,7 @@ void vmx_reflect_interruption(UINT64 ifa
      UINT64 vector,REGS *regs)
 {
     VCPU *vcpu = current;
-    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
+    UINT64 vpsr = VCPU(vcpu, vpsr);
     vector=vec2off[vector];
     if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
         panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
@@ -156,7 +156,7 @@ void save_banked_regs_to_vpd(VCPU *v, RE
     IA64_PSR vpsr;
     src=&regs->r16;
     sunat=&regs->eml_unat;
-    vpsr.val = vmx_vcpu_get_psr(v);
+    vpsr.val = VCPU(v, vpsr);
     if(vpsr.bn){
         dst = &VCPU(v, vgr[0]);
         dunat =&VCPU(v, vnat);
@@ -253,7 +253,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     check_vtlb_sanity(vtlb);
     dump_vtlb(vtlb);
 #endif
-    vpsr.val = vmx_vcpu_get_psr(v);
+    vpsr.val = VCPU(v, vpsr);
     misr.val=VMX(v,cr_isr);
 
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_utility.c
--- a/xen/arch/ia64/vmx/vmx_utility.c   Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_utility.c   Fri Jul 14 11:05:40 2006 -0600
@@ -381,7 +381,7 @@ set_isr_ei_ni (VCPU *vcpu)
 
     visr.val = 0;
 
-    vpsr.val = vmx_vcpu_get_psr (vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
 
     if (!vpsr.ic == 1 ) {
         /* Set ISR.ni */
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Fri Jul 14 11:05:40 2006 -0600
@@ -67,6 +67,8 @@
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
 //unsigned long last_guest_rsm = 0x0;
+
+#ifdef VTI_DEBUG
 struct guest_psr_bundle{
     unsigned long ip;
     unsigned long psr;
@@ -74,6 +76,7 @@ struct guest_psr_bundle{
 
 struct guest_psr_bundle guest_psr_buf[100];
 unsigned long guest_psr_index = 0;
+#endif
 
 void
 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
@@ -82,7 +85,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
     UINT64 mask;
     REGS *regs;
     IA64_PSR old_psr, new_psr;
-    old_psr.val=vmx_vcpu_get_psr(vcpu);
+    old_psr.val=VCPU(vcpu, vpsr);
 
     regs=vcpu_regs(vcpu);
     /* We only support guest as:
@@ -108,7 +111,8 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
         // vpsr.i 0->1
         vcpu->arch.irq_new_condition = 1;
     }
-    new_psr.val=vmx_vcpu_get_psr(vcpu);
+    new_psr.val=VCPU(vcpu, vpsr);
+#ifdef VTI_DEBUG    
     {
     struct pt_regs *regs = vcpu_regs(vcpu);
     guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
@@ -116,6 +120,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
     if (++guest_psr_index >= 100)
         guest_psr_index = 0;
     }
+#endif    
 #if 0
     if (old_psr.i != new_psr.i) {
     if (old_psr.i)
@@ -149,24 +154,14 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v
 {
     // TODO: trap_bounce?? Eddie
     REGS *regs = vcpu_regs(vcpu);
-    IA64_PSR vpsr;
     IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
 
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
-    if (vpsr.ri == 2) {
-    vpsr.ri = 0;
-    regs->cr_iip += 16;
+    if (ipsr->ri == 2) {
+        ipsr->ri = 0;
+        regs->cr_iip += 16;
     } else {
-    vpsr.ri++;
-    }
-
-    ipsr->ri = vpsr.ri;
-    vpsr.val &=
-            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
-                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
-            ));
-
-    VCPU(vcpu, vpsr) = vpsr.val;
+        ipsr->ri++;
+    }
 
     ipsr->val &=
             (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
@@ -181,7 +176,7 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
 {
     REGS *regs = vcpu_regs(vcpu);
     IA64_PSR vpsr;
-    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vpsr.val = VCPU(vcpu, vpsr);
 
     if(!vpsr.ic)
         VCPU(vcpu,ifs) = regs->cr_ifs;
@@ -287,12 +282,6 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
 }
 
 
-UINT64
-vmx_vcpu_get_psr(VCPU *vcpu)
-{
-    return VCPU(vcpu,vpsr);
-}
-
 #if 0
 IA64FAULT
 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
@@ -390,6 +379,20 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
 
 #endif
 
+/*
+    VPSR can't keep track of below bits of guest PSR
+    This function gets guest PSR
+ */
+
+UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
+{
+    UINT64 mask;
+    REGS *regs = vcpu_regs(vcpu);
+    mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+           IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
+    return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
+}
+
 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
 {
     UINT64 vpsr;
@@ -412,6 +415,7 @@ IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu
 
 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
 {
+    val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
     vmx_vcpu_set_psr(vcpu, val);
     return IA64_NO_FAULT;
 }
diff -r 000789c36d28 -r 4834d1e8f26e xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Wed Jul 12 13:26:09 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Fri Jul 14 11:05:40 2006 -0600
@@ -154,7 +154,6 @@ IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST6
     return vmx_vcpu_set_psr_sm(vcpu,imm24);
 }
 
-unsigned long last_guest_psr = 0x0;
 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
 {
     UINT64 tgt = inst.M33.r1;
@@ -167,7 +166,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
     */
     val = vmx_vcpu_get_psr(vcpu);
     val = (val & MASK(0, 32)) | (val & MASK(35, 2));
-    last_guest_psr = val;
     return vcpu_set_gr(vcpu, tgt, val, 0);
 }
 
@@ -181,14 +179,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
     if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
        panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
 
-       val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
-#if 0
-       if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
-               while(1);
-       else
-               last_mov_from_psr = 0;
-#endif
-        return vmx_vcpu_set_psr_l(vcpu,val);
+    return vmx_vcpu_set_psr_l(vcpu, val);
 }
 
 
@@ -256,6 +247,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
 {
     u64 r2,r3;
+#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
 
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -265,6 +257,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
         privilege_op (vcpu);
         return IA64_FAULT;
     }
+#endif // VMAL_NO_FAULT_CHECK
     
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
 #ifdef  VMAL_NO_FAULT_CHECK
         ISR isr;
@@ -288,10 +281,10 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INS
 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
 {
     u64 r3;
+#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
 
     vpsr.val=vmx_vcpu_get_psr(vcpu);
-#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
@@ -574,6 +567,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
 {
     UINT64 itir, ifa, pte, slot;
+#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.ic ) {
@@ -581,7 +575,6 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
         illegal_op(vcpu);
         return IA64_FAULT;
     }
-#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
@@ -633,7 +626,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
     UINT64 itir, ifa, pte, slot;
 #ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-#endif
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.ic ) {
@@ -641,7 +633,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
         illegal_op(vcpu);
         return IA64_FAULT;
     }
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -689,9 +680,10 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
 
 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 
*pte)
 {
+    IA64FAULT  ret1;
+
+#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
-    IA64FAULT  ret1;
-
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.ic ) {
         set_illegal_op_isr(vcpu);
@@ -699,7 +691,6 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
         return IA64_FAULT;
     }
 
-#ifdef  VMAL_NO_FAULT_CHECK
     UINT64 fault;
     ISR isr;
     if ( vpsr.cpl != 0) {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.