[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] cleanup vmx_virt.c



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1190048669 21600
# Node ID 0f16d41ebb0bdf94072b109815aba7f6210f0f0f
# Parent  487df63c4ae9235163f041fbf2a1d8af657630a9
[IA64] cleanup vmx_virt.c

Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
 xen/arch/ia64/vmx/vmx_virt.c |  339 +++++++++++++++++++++----------------------
 1 files changed, 173 insertions(+), 166 deletions(-)

diff -r 487df63c4ae9 -r 0f16d41ebb0b xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Mon Sep 17 10:59:27 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Mon Sep 17 11:04:29 2007 -0600
@@ -202,6 +202,7 @@ static IA64FAULT vmx_emul_rfi(VCPU *vcpu
         return IA64_FAULT;
     }
 #endif // CHECK_FAULT
+
     regs=vcpu_regs(vcpu);
     vpsr.val=regs->cr_ipsr;
     if ( vpsr.is == 1 ) {
@@ -275,8 +276,9 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc
         vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
     return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
 }
 
@@ -333,8 +335,9 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc
         vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
     return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
 }
 
@@ -366,8 +369,9 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v
         vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
     return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
 }
 
@@ -568,40 +572,43 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
 {
     u64 itir, ifa, pte, slot;
     ISR isr;
+
 #ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-    if ( vpsr.cpl != 0) {
-        /* Inject Privileged Operation fault into guest */
-        set_privileged_operation_isr (vcpu, 0);
-        privilege_op (vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-    
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
-#ifdef  VMAL_NO_FAULT_CHECK
-        set_isr_reg_nat_consumption(vcpu,0,0);
-        rnat_comsumption(vcpu);
-        return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
-    if(is_reserved_rr_register(vcpu, slot)){
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-
-    if (vcpu_get_itir(vcpu,&itir)){
+
+    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    if (vpsr.ic) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+    if (vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr(vcpu, 0);
+        privilege_op (vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+    if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
+        || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
+#ifdef  VMAL_NO_FAULT_CHECK
+        set_isr_reg_nat_consumption(vcpu, 0, 0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (is_reserved_rr_register(vcpu, slot)) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
+    if (vcpu_get_itir(vcpu ,&itir)) {
         return(IA64_FAULT);
     }
-    if (vcpu_get_ifa(vcpu,&ifa)){
+    if (vcpu_get_ifa(vcpu, &ifa)) {
         return(IA64_FAULT);
     }
 #ifdef  VMAL_NO_FAULT_CHECK
@@ -609,139 +616,140 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
        // TODO
        return IA64_FAULT;
     }
+    if (unimplemented_gva(vcpu, ifa)) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+   }
+#endif // VMAL_NO_FAULT_CHECK
+
+    if (slot >= NDTRS) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        rsv_reg_field(vcpu);
+        return IA64_FAULT;
+    }
+
+    return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));
+}
+
+static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
+{
+    u64 itir, ifa, pte, slot;
+    ISR isr;
+#ifdef  VMAL_NO_FAULT_CHECK
+    IA64_PSR  vpsr;
+    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    if (vpsr.ic) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+    if (vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr(vcpu, 0);
+        privilege_op(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+    if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
+        || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
+#ifdef  VMAL_NO_FAULT_CHECK
+        set_isr_reg_nat_consumption(vcpu, 0, 0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (is_reserved_rr_register(vcpu, slot)) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
+    if (vcpu_get_itir(vcpu, &itir)) {
+        return IA64_FAULT;
+    }
+    if (vcpu_get_ifa(vcpu, &ifa)) {
+        return IA64_FAULT;
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (is_reserved_itir_field(vcpu, itir)) {
+       // TODO
+       return IA64_FAULT;
+    }
+    if (unimplemented_gva(vcpu, ifa)) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
+    if (slot >= NITRS) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        rsv_reg_field(vcpu);
+        return IA64_FAULT;
+    }
+
+    return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);
+}
+
+static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
+                                 u64 *itir, u64 *ifa, u64 *pte)
+{
+    IA64FAULT  ret1;
+
+#ifdef  VMAL_NO_FAULT_CHECK
+    IA64_PSR  vpsr;
+    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    if (vpsr.ic) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+
+    u64 fault;
+    ISR isr;
+    if (vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr(vcpu, 0);
+        privilege_op(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+    ret1 = vcpu_get_gr_nat(vcpu, inst.M45.r2,pte);
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (ret1 != IA64_NO_FAULT) {
+        set_isr_reg_nat_consumption(vcpu, 0, 0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+
+    if (vcpu_get_itir(vcpu, itir)) {
+        return IA64_FAULT;
+    }
+    if (vcpu_get_ifa(vcpu, ifa)) {
+        return IA64_FAULT;
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
     if (unimplemented_gva(vcpu,ifa) ) {
         isr.val = set_isr_ei_ni(vcpu);
         isr.code = IA64_RESERVED_REG_FAULT;
         vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
-
-    if (slot >= NDTRS) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
-        rsv_reg_field(vcpu);
-        return IA64_FAULT;
-    }
-
-    return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
-}
-
-static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
-{
-    u64 itir, ifa, pte, slot;
-    ISR isr;
-#ifdef  VMAL_NO_FAULT_CHECK
-    IA64_PSR  vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-    if ( vpsr.cpl != 0) {
-        /* Inject Privileged Operation fault into guest */
-        set_privileged_operation_isr (vcpu, 0);
-        privilege_op (vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-    
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
-#ifdef  VMAL_NO_FAULT_CHECK
-        set_isr_reg_nat_consumption(vcpu,0,0);
-        rnat_comsumption(vcpu);
-        return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
-    if(is_reserved_rr_register(vcpu, slot)){
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-
-    if (vcpu_get_itir(vcpu,&itir)){
-        return(IA64_FAULT);
-    }
-    if (vcpu_get_ifa(vcpu,&ifa)){
-        return(IA64_FAULT);
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
-    if (is_reserved_itir_field(vcpu, itir)) {
-       // TODO
-       return IA64_FAULT;
-    }
-    if (unimplemented_gva(vcpu,ifa) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
-        unimpl_daddr(vcpu);
-        return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
-
-    if (slot >= NITRS) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
-        rsv_reg_field(vcpu);
-        return IA64_FAULT;
-    }
- 
-   return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
-}
-
-static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
-                                 u64 *itir, u64 *ifa, u64 *pte)
-{
-    IA64FAULT  ret1;
-
-#ifdef  VMAL_NO_FAULT_CHECK
-    IA64_PSR  vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-
-    u64 fault;
-    ISR isr;
-    if ( vpsr.cpl != 0) {
-        /* Inject Privileged Operation fault into guest */
-        set_privileged_operation_isr (vcpu, 0);
-        privilege_op (vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-    ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
-#ifdef  VMAL_NO_FAULT_CHECK
-    if( ret1 != IA64_NO_FAULT ){
-        set_isr_reg_nat_consumption(vcpu,0,0);
-        rnat_comsumption(vcpu);
-        return IA64_FAULT;
-    }
-#endif // VMAL_NO_FAULT_CHECK
-
-    if (vcpu_get_itir(vcpu,itir)){
-        return(IA64_FAULT);
-    }
-    if (vcpu_get_ifa(vcpu,ifa)){
-        return(IA64_FAULT);
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
-    if (unimplemented_gva(vcpu,ifa) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
-        unimpl_daddr(vcpu);
-        return IA64_FAULT;
-   }
-#endif // VMAL_NO_FAULT_CHECK
-   return IA64_NO_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK
+    return IA64_NO_FAULT;
 }
 
 static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
@@ -752,7 +760,7 @@ static IA64FAULT vmx_emul_itc_d(VCPU *vc
        return IA64_FAULT;
     }
 
-   return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
+    return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);
 }
 
 static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
@@ -763,8 +771,7 @@ static IA64FAULT vmx_emul_itc_i(VCPU *vc
        return IA64_FAULT;
     }
 
-   return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
-
+    return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);
 }
 
 /*************************************

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.