[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Reimplement vcpu_get_psr.



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1180630128 21600
# Node ID b4cc3fbcdf252c6b21ff22b93a7ea9877d2c0a09
# Parent  919d72f6dc45ca6f7b5b4103fd06abac6bf2b4b7
[IA64] Reimplement vcpu_get_psr.

It now returns the virtualized psr.

Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
 xen/arch/ia64/xen/privop.c  |    4 +-
 xen/arch/ia64/xen/vcpu.c    |   88 +++++++++++++++++++++++++-------------------
 xen/include/asm-ia64/vcpu.h |    3 +
 3 files changed, 55 insertions(+), 40 deletions(-)

diff -r 919d72f6dc45 -r b4cc3fbcdf25 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Thu May 31 09:45:46 2007 -0600
+++ b/xen/arch/ia64/xen/privop.c        Thu May 31 10:48:48 2007 -0600
@@ -524,7 +524,7 @@ static IA64FAULT priv_mov_from_psr(VCPU 
        u64 val;
        IA64FAULT fault;
 
-       fault = vcpu_get_psr(vcpu, &val);
+       fault = vcpu_get_psr_masked(vcpu, &val);
        if (fault == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, tgt, val, 0);
        else
@@ -883,7 +883,7 @@ int ia64_hyperprivop(unsigned long iim, 
                vcpu_reset_psr_sm(v, IA64_PSR_BE);
                return 1;
        case HYPERPRIVOP_GET_PSR:
-               vcpu_get_psr(v, &val);
+               vcpu_get_psr_masked(v, &val);
                regs->r8 = val;
                return 1;
        }
diff -r 919d72f6dc45 -r b4cc3fbcdf25 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Thu May 31 09:45:46 2007 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Thu May 31 10:48:48 2007 -0600
@@ -448,43 +448,47 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u6
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
-{
-       REGS *regs = vcpu_regs(vcpu);
-       struct ia64_psr newpsr;
-
-       newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
-       if (!vcpu->vcpu_info->evtchn_upcall_mask)
-               newpsr.i = 1;
-       else
-               newpsr.i = 0;
-       if (PSCB(vcpu, interrupt_collection_enabled))
-               newpsr.ic = 1;
-       else
-               newpsr.ic = 0;
-       if (PSCB(vcpu, metaphysical_mode))
-               newpsr.dt = 0;
-       else
-               newpsr.dt = 1;
-       if (PSCB(vcpu, vpsr_pp))
-               newpsr.pp = 1;
-       else
-               newpsr.pp = 0;
-       newpsr.dfh = PSCB(vcpu, vpsr_dfh);
-
-       *pval = *(unsigned long *)&newpsr;
-       *pval &= (MASK(0, 32) | MASK(35, 2));
-       return IA64_NO_FAULT;
-}
-
-BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
-{
-       return !!PSCB(vcpu, interrupt_collection_enabled);
-}
-
-BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
-{
-       return !vcpu->vcpu_info->evtchn_upcall_mask;
+u64 vcpu_get_psr(VCPU * vcpu)
+{
+       REGS *regs = vcpu_regs(vcpu);
+       PSR newpsr;
+       PSR ipsr;
+
+       ipsr.i64 = regs->cr_ipsr;
+
+       /* Copy non-virtualized bits.  */
+       newpsr.i64 = ipsr.i64 & (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC |
+                                IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK |
+                                IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB |
+                                IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID |
+                                IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS |
+                                IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA);
+
+       /* Bits forced to 1 (psr.si and psr.is are forced to 0)  */
+       newpsr.i64 |= IA64_PSR_DI;
+
+       /* System mask.  */
+       newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
+       newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
+
+       if (!PSCB(vcpu, metaphysical_mode))
+               newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
+       newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
+       newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
+
+       /* Fool cpl.  */
+       if (ipsr.ia64_psr.cpl < 3)
+               newpsr.ia64_psr.cpl = 0;
+       newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
+       
+       return newpsr.i64;
+}
+
+IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
+{
+       u64 psr = vcpu_get_psr(vcpu);
+       *pval = psr & (MASK(0, 32) | MASK(35, 2));
+       return IA64_NO_FAULT;
 }
 
 u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
@@ -509,6 +513,16 @@ u64 vcpu_get_ipsr_int_state(VCPU * vcpu,
        // psr.pk = 1;
        //printk("returns 0x%016lx...\n",psr.i64);
        return psr.i64;
+}
+
+BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
+{
+       return !!PSCB(vcpu, interrupt_collection_enabled);
+}
+
+BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
+{
+       return !vcpu->vcpu_info->evtchn_upcall_mask;
 }
 
 /**************************************************************************
diff -r 919d72f6dc45 -r b4cc3fbcdf25 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Thu May 31 09:45:46 2007 -0600
+++ b/xen/include/asm-ia64/vcpu.h       Thu May 31 10:48:48 2007 -0600
@@ -42,7 +42,8 @@ extern IA64FAULT vcpu_get_ar(VCPU * vcpu
 /* psr */
 extern BOOLEAN vcpu_get_psr_ic(VCPU * vcpu);
 extern u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr);
-extern IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval);
+extern u64 vcpu_get_psr(VCPU * vcpu);
+extern IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval);
 extern IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm);
 extern IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm);
 extern IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.