[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] deopfuscate vcpu.c and priop.c and related headers



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 78c494a16b951c96452cc395dd49ac1d228b3dcd
# Parent  c5ddcf89f050e795e9700f7a400b292a1b2cd8e5
[IA64] deopfuscate vcpu.c and priop.c and related headers

De-opfuscate vcpu.c, priovop.c and related header files to make them
easier to debug and read. In addition eliminate bogus UINT64 and UINT
data types which only exist in the ia64 tree and are totally
unnecessary as we have u64 already.

Signed-off-by: Jes Sorensen <jes@xxxxxxx>
---
 xen/arch/ia64/vmx/pal_emul.c        |    6 
 xen/arch/ia64/vmx/vlsapic.c         |    6 
 xen/arch/ia64/vmx/vmmu.c            |   26 
 xen/arch/ia64/vmx/vmx_phy_mode.c    |    4 
 xen/arch/ia64/vmx/vmx_process.c     |   14 
 xen/arch/ia64/vmx/vmx_vcpu.c        |   38 
 xen/arch/ia64/vmx/vmx_virt.c        |   30 
 xen/arch/ia64/xen/privop.c          | 1039 +++++++++--------
 xen/arch/ia64/xen/vcpu.c            | 2128 +++++++++++++++++++-----------------
 xen/arch/ia64/xen/vhpt.c            |    2 
 xen/include/asm-ia64/dom_fw.h       |    2 
 xen/include/asm-ia64/privop.h       |    4 
 xen/include/asm-ia64/vcpu.h         |  314 ++---
 xen/include/asm-ia64/vmx_pal_vsa.h  |    7 
 xen/include/asm-ia64/vmx_phy_mode.h |    7 
 xen/include/asm-ia64/vmx_vcpu.h     |  805 ++++++-------
 16 files changed, 2310 insertions(+), 2122 deletions(-)

diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c      Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/pal_emul.c      Tue Oct 17 15:43:41 2006 -0600
@@ -48,7 +48,7 @@
        }
 
 static void
-get_pal_parameters(VCPU *vcpu, UINT64 *gr29, UINT64 *gr30, UINT64 *gr31) {
+get_pal_parameters(VCPU *vcpu, u64 *gr29, u64 *gr30, u64 *gr31) {
 
        vcpu_get_gr_nat(vcpu,29,gr29);
        vcpu_get_gr_nat(vcpu,30,gr30); 
@@ -75,7 +75,7 @@ set_sal_result(VCPU *vcpu,struct sal_ret
 
 static struct ia64_pal_retval
 pal_cache_flush(VCPU *vcpu) {
-       UINT64 gr28,gr29, gr30, gr31;
+       u64 gr28,gr29, gr30, gr31;
        struct ia64_pal_retval result;
 
        get_pal_parameters(vcpu, &gr29, &gr30, &gr31);
@@ -384,7 +384,7 @@ pal_vm_page_size(VCPU *vcpu) {
 
 void
 pal_emul(VCPU *vcpu) {
-       UINT64 gr28;
+       u64 gr28;
        struct ia64_pal_retval result;
 
        vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vlsapic.c       Tue Oct 17 15:43:41 2006 -0600
@@ -49,8 +49,8 @@
  * Update the checked last_itc.
  */
 
-extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
-     UINT64 vector,REGS *regs);
+extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
+                                     u64 vector, REGS *regs);
 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
 {
     vtm->last_itc = cur_itc;
@@ -533,7 +533,7 @@ int vmx_vcpu_pend_interrupt(VCPU *vcpu, 
  * The interrupt source is contained in pend_irr[0-3] with
  * each bits stand for one interrupt.
  */
-void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
+void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
 {
     uint64_t    spsr;
     int     i;
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Tue Oct 17 15:43:41 2006 -0600
@@ -363,7 +363,7 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUN
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
+IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
 {
 #ifdef VTLB_DEBUG
     int slot;
@@ -382,7 +382,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
+IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
 {
     u64 gpfn;
 #ifdef VTLB_DEBUG    
@@ -478,7 +478,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
 
 
 
-IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
+IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
 {
     int index;
     u64 va;
@@ -491,7 +491,7 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
+IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
 {
     int index;
     u64 va;
@@ -504,7 +504,7 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
+IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
 {
     va = PAGEALIGN(va, ps);
     thash_purge_entries(vcpu, va, ps);
@@ -512,19 +512,19 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN
 }
 
 
-IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
+IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
 {
     thash_purge_all(vcpu);
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
+IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
 {
     vmx_vcpu_ptc_ga(vcpu, va, ps);
     return IA64_ILLOP_FAULT;
 }
 /*
-IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
+IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
 {
     vmx_vcpu_ptc_l(vcpu, va, ps);
     return IA64_NO_FAULT;
@@ -562,7 +562,7 @@ static void ptc_ga_remote_func (void *va
 }
 
 
-IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
+IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
 {
 
     struct domain *d = vcpu->domain;
@@ -596,7 +596,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UIN
 }
 
 
-IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
+IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval)
 {
     PTA vpta;
     ia64_rr vrr;
@@ -616,7 +616,7 @@ IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UIN
 }
 
 
-IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
+IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval)
 {
     ia64_rr vrr;
     PTA vpta;
@@ -632,7 +632,7 @@ IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT
 
 
 
-IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
+IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
 {
     thash_data_t *data;
     ISR visr,pt_isr;
@@ -718,7 +718,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
     }
 }
 
-IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
+IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key)
 {
     thash_data_t *data;
     PTA vpta;
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Oct 17 15:43:41 2006 -0600
@@ -204,7 +204,7 @@ void
 void
 switch_to_physical_rid(VCPU *vcpu)
 {
-    UINT64 psr;
+    u64 psr;
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
     ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
@@ -221,7 +221,7 @@ void
 void
 switch_to_virtual_rid(VCPU *vcpu)
 {
-    UINT64 psr;
+    u64 psr;
     psr=ia64_clear_ic();
     ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
     ia64_srlz_d();
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c   Tue Oct 17 15:43:41 2006 -0600
@@ -66,7 +66,7 @@ extern unsigned long handle_fpu_swa (int
 #define DOMN_PAL_REQUEST    0x110000
 #define DOMN_SAL_REQUEST    0x110001
 
-static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
+static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
     0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
     0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
     0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
@@ -78,12 +78,12 @@ static UINT64 vec2off[68] = {0x0,0x400,0
 
 
 
-void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
-     UINT64 vector,REGS *regs)
-{
-    UINT64 status;
+void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
+                              u64 vector, REGS *regs)
+{
+    u64 status;
     VCPU *vcpu = current;
-    UINT64 vpsr = VCPU(vcpu, vpsr);
+    u64 vpsr = VCPU(vcpu, vpsr);
     vector=vec2off[vector];
     if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
         panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
@@ -253,7 +253,7 @@ void leave_hypervisor_tail(struct pt_reg
     }
 }
 
-extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
+extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
 
 static int vmx_handle_lds(REGS* regs)
 {
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Tue Oct 17 15:43:41 2006 -0600
@@ -82,7 +82,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
 {
 
-    UINT64 mask;
+    u64 mask;
     REGS *regs;
     IA64_PSR old_psr, new_psr;
     old_psr.val=VCPU(vcpu, vpsr);
@@ -208,7 +208,7 @@ vmx_vcpu_get_plat(VCPU *vcpu)
 
 
 
-IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
+IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
 {
     ia64_rr oldrr,newrr;
     extern void * pal_vaddr;
@@ -252,14 +252,14 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
  VCPU protection key register access routines
 **************************************************************************/
 
-IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    UINT64 val = (UINT64)ia64_get_pkr(reg);
+IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg, u64 *pval)
+{
+    u64 val = (u64)ia64_get_pkr(reg);
     *pval = val;
     return (IA64_NO_FAULT);
 }
 
-IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
+IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
 {
     ia64_set_pkr(reg,val);
     return (IA64_NO_FAULT);
@@ -295,7 +295,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
 {
     // TODO: Only allowed for current vcpu
-    UINT64 ifs, psr;
+    u64 ifs, psr;
     REGS *regs = vcpu_regs(vcpu);
     psr = VCPU(vcpu,ipsr);
     if (psr & IA64_PSR_BN)
@@ -313,7 +313,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
 
 #if 0
 IA64FAULT
-vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
+vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
 {
     IA64_PSR vpsr;
 
@@ -366,7 +366,7 @@ vmx_vcpu_set_bgr(VCPU *vcpu, unsigned in
 #endif
 #if 0
 IA64FAULT
-vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
+vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
 {
     REGS *regs=vcpu_regs(vcpu);
     int nat;
@@ -413,18 +413,18 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
     This function gets guest PSR
  */
 
-UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
-{
-    UINT64 mask;
+u64 vmx_vcpu_get_psr(VCPU *vcpu)
+{
+    u64 mask;
     REGS *regs = vcpu_regs(vcpu);
     mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
            IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
     return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
 }
 
-IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
-{
-    UINT64 vpsr;
+IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
+{
+    u64 vpsr;
     vpsr = vmx_vcpu_get_psr(vcpu);
     vpsr &= (~imm24);
     vmx_vcpu_set_psr(vcpu, vpsr);
@@ -432,9 +432,9 @@ IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vc
 }
 
 
-IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
-{
-    UINT64 vpsr;
+IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
+{
+    u64 vpsr;
     vpsr = vmx_vcpu_get_psr(vcpu);
     vpsr |= imm24;
     vmx_vcpu_set_psr(vcpu, vpsr);
@@ -442,7 +442,7 @@ IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu
 }
 
 
-IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
+IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
 {
     val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
     vmx_vcpu_set_psr(vcpu, val);
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Tue Oct 17 15:43:41 2006 -0600
@@ -32,7 +32,7 @@
 #include <asm/vmx_phy_mode.h>
 
 void
-ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64  * cause)
+ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause)
 {
     *cause=0;
     switch (slot_type) {
@@ -144,20 +144,20 @@ ia64_priv_decoder(IA64_SLOT_TYPE slot_ty
 
 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
 {
-    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
+    u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
     return vmx_vcpu_reset_psr_sm(vcpu,imm24);
 }
 
 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
 {
-    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
+    u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
     return vmx_vcpu_set_psr_sm(vcpu,imm24);
 }
 
 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
 {
-    UINT64 tgt = inst.M33.r1;
-    UINT64 val;
+    u64 tgt = inst.M33.r1;
+    u64 val;
 
 /*
     if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
@@ -174,7 +174,7 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
  */
 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
 {
-    UINT64 val;
+    u64 val;
 
     if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
        panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
@@ -566,7 +566,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
 
 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
 {
-    UINT64 itir, ifa, pte, slot;
+    u64 itir, ifa, pte, slot;
 #ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -623,7 +623,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
 
 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
 {
-    UINT64 itir, ifa, pte, slot;
+    u64 itir, ifa, pte, slot;
 #ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
     IA64_PSR  vpsr;
@@ -691,7 +691,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
         return IA64_FAULT;
     }
 
-    UINT64 fault;
+    u64 fault;
     ISR isr;
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
@@ -729,7 +729,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
 
 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
 {
-    UINT64 itir, ifa, pte;
+    u64 itir, ifa, pte;
 
     if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
        return IA64_FAULT;
@@ -740,7 +740,7 @@ IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INS
 
 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
 {
-    UINT64 itir, ifa, pte;
+    u64 itir, ifa, pte;
 
     if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
        return IA64_FAULT;
@@ -757,7 +757,7 @@ IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *v
 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
 {
     // I27 and M30 are identical for these fields
-    UINT64  imm;
+    u64 imm;
 
     if(inst.M30.ar3!=44){
         panic_domain(vcpu_regs(vcpu),"Can't support ar register other than 
itc");
@@ -1277,8 +1277,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
 
 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
 {
-    UINT64 tgt = inst.M33.r1;
-    UINT64 val;
+    u64 tgt = inst.M33.r1;
+    u64 val;
     IA64FAULT fault;
 #ifdef  CHECK_FAULT
     IA64_PSR vpsr;
@@ -1353,7 +1353,7 @@ vmx_emulate(VCPU *vcpu, REGS *regs)
 {
     IA64FAULT status;
     INST64 inst;
-    UINT64 iip, cause, opcode;
+    u64 iip, cause, opcode;
     iip = regs->cr_iip;
     cause = VMX(vcpu,cause);
     opcode = VMX(vcpu,opcode);
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/xen/privop.c        Tue Oct 17 15:43:41 2006 -0600
@@ -9,13 +9,13 @@
 #include <asm/privop.h>
 #include <asm/vcpu.h>
 #include <asm/processor.h>
-#include <asm/delay.h> // Debug only
+#include <asm/delay.h>         // Debug only
 #include <asm/dom_fw.h>
 #include <asm/vhpt.h>
 #include <asm/bundle.h>
 #include <xen/perfc.h>
 
-long priv_verbose=0;
+long priv_verbose = 0;
 unsigned long privop_trace = 0;
 
 /* Set to 1 to handle privified instructions from the privify tool. */
@@ -29,200 +29,205 @@ Privileged operation emulation routines
 Privileged operation emulation routines
 **************************************************************************/
 
-static IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
+static IA64FAULT priv_rfi(VCPU * vcpu, INST64 inst)
 {
        return vcpu_rfi(vcpu);
 }
 
-static IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
+static IA64FAULT priv_bsw0(VCPU * vcpu, INST64 inst)
 {
        return vcpu_bsw0(vcpu);
 }
 
-static IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
+static IA64FAULT priv_bsw1(VCPU * vcpu, INST64 inst)
 {
        return vcpu_bsw1(vcpu);
 }
 
-static IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
+static IA64FAULT priv_cover(VCPU * vcpu, INST64 inst)
 {
        return vcpu_cover(vcpu);
 }
 
-static IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
-{
-       UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 log_range;
-
-       log_range = ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
-       return vcpu_ptc_l(vcpu,vadr,log_range);
-}
-
-static IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
-{
-       UINT src = inst.M28.r3;
+static IA64FAULT priv_ptc_l(VCPU * vcpu, INST64 inst)
+{
+       u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
+       u64 log_range;
+
+       log_range = ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
+       return vcpu_ptc_l(vcpu, vadr, log_range);
+}
+
+static IA64FAULT priv_ptc_e(VCPU * vcpu, INST64 inst)
+{
+       unsigned int src = inst.M28.r3;
 
        // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
        if (privify_en && src > 63)
-               return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
-       return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
-}
-
-static IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
-{
-       UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 addr_range;
-
-       addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
-       return vcpu_ptc_g(vcpu,vadr,addr_range);
-}
-
-static IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
-{
-       UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 addr_range;
-
-       addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
-       return vcpu_ptc_ga(vcpu,vadr,addr_range);
-}
-
-static IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
-{
-       UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 log_range;
-
-       log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
-       return vcpu_ptr_d(vcpu,vadr,log_range);
-}
-
-static IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
-{
-       UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 log_range;
-
-       log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
-       return vcpu_ptr_i(vcpu,vadr,log_range);
-}
-
-static IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
-{
-       UINT64 padr;
-       UINT fault;
-       UINT src = inst.M46.r3;
+               return vcpu_fc(vcpu, vcpu_get_gr(vcpu, src - 64));
+       return vcpu_ptc_e(vcpu, vcpu_get_gr(vcpu, src));
+}
+
+static IA64FAULT priv_ptc_g(VCPU * vcpu, INST64 inst)
+{
+       u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
+       u64 addr_range;
+
+       addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
+       return vcpu_ptc_g(vcpu, vadr, addr_range);
+}
+
+static IA64FAULT priv_ptc_ga(VCPU * vcpu, INST64 inst)
+{
+       u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
+       u64 addr_range;
+
+       addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
+       return vcpu_ptc_ga(vcpu, vadr, addr_range);
+}
+
+static IA64FAULT priv_ptr_d(VCPU * vcpu, INST64 inst)
+{
+       u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
+       u64 log_range;
+
+       log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
+       return vcpu_ptr_d(vcpu, vadr, log_range);
+}
+
+static IA64FAULT priv_ptr_i(VCPU * vcpu, INST64 inst)
+{
+       u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
+       u64 log_range;
+
+       log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
+       return vcpu_ptr_i(vcpu, vadr, log_range);
+}
+
+static IA64FAULT priv_tpa(VCPU * vcpu, INST64 inst)
+{
+       u64 padr;
+       unsigned int fault;
+       unsigned int src = inst.M46.r3;
 
        // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
        if (privify_en && src > 63)
-               fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
-       else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
+               fault = vcpu_ttag(vcpu, vcpu_get_gr(vcpu, src - 64), &padr);
+       else
+               fault = vcpu_tpa(vcpu, vcpu_get_gr(vcpu, src), &padr);
        if (fault == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
-       else return fault;
-}
-
-static IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
-{
-       UINT64 key;
-       UINT fault;
-       UINT src = inst.M46.r3;
+       else
+               return fault;
+}
+
+static IA64FAULT priv_tak(VCPU * vcpu, INST64 inst)
+{
+       u64 key;
+       unsigned int fault;
+       unsigned int src = inst.M46.r3;
 
        // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
        if (privify_en && src > 63)
-               fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
-       else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
+               fault = vcpu_thash(vcpu, vcpu_get_gr(vcpu, src - 64), &key);
+       else
+               fault = vcpu_tak(vcpu, vcpu_get_gr(vcpu, src), &key);
        if (fault == IA64_NO_FAULT)
-               return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
-       else return fault;
+               return vcpu_set_gr(vcpu, inst.M46.r1, key, 0);
+       else
+               return fault;
 }
 
 /************************************
  * Insert translation register/cache
 ************************************/
 
-static IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
-{
-       UINT64 fault, itir, ifa, pte, slot;
-
-       //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       pte = vcpu_get_gr(vcpu,inst.M42.r2);
-       slot = vcpu_get_gr(vcpu,inst.M42.r3);
-
-       return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
-}
-
-static IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
-{
-       UINT64 fault, itir, ifa, pte, slot;
-
-       //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       pte = vcpu_get_gr(vcpu,inst.M42.r2);
-       slot = vcpu_get_gr(vcpu,inst.M42.r3);
-
-       return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
-}
-
-static IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
-{
-       UINT64 fault, itir, ifa, pte;
-
-       //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       pte = vcpu_get_gr(vcpu,inst.M41.r2);
-
-       return (vcpu_itc_d(vcpu,pte,itir,ifa));
-}
-
-static IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
-{
-       UINT64 fault, itir, ifa, pte;
-
-       //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
-               return(IA64_ILLOP_FAULT);
-       pte = vcpu_get_gr(vcpu,inst.M41.r2);
-
-       return (vcpu_itc_i(vcpu,pte,itir,ifa));
+static IA64FAULT priv_itr_d(VCPU * vcpu, INST64 inst)
+{
+       u64 fault, itir, ifa, pte, slot;
+
+       //if (!vcpu_get_psr_ic(vcpu))
+       //      return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       pte = vcpu_get_gr(vcpu, inst.M42.r2);
+       slot = vcpu_get_gr(vcpu, inst.M42.r3);
+
+       return vcpu_itr_d(vcpu, slot, pte, itir, ifa);
+}
+
+static IA64FAULT priv_itr_i(VCPU * vcpu, INST64 inst)
+{
+       u64 fault, itir, ifa, pte, slot;
+
+       //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       pte = vcpu_get_gr(vcpu, inst.M42.r2);
+       slot = vcpu_get_gr(vcpu, inst.M42.r3);
+
+       return vcpu_itr_i(vcpu, slot, pte, itir, ifa);
+}
+
+static IA64FAULT priv_itc_d(VCPU * vcpu, INST64 inst)
+{
+       u64 fault, itir, ifa, pte;
+
+       //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       pte = vcpu_get_gr(vcpu, inst.M41.r2);
+
+       return vcpu_itc_d(vcpu, pte, itir, ifa);
+}
+
+static IA64FAULT priv_itc_i(VCPU * vcpu, INST64 inst)
+{
+       u64 fault, itir, ifa, pte;
+
+       //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
+               return IA64_ILLOP_FAULT;
+       pte = vcpu_get_gr(vcpu, inst.M41.r2);
+
+       return vcpu_itc_i(vcpu, pte, itir, ifa);
 }
 
 /*************************************
  * Moves to semi-privileged registers
 *************************************/
 
-static IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
+static IA64FAULT priv_mov_to_ar_imm(VCPU * vcpu, INST64 inst)
 {
        // I27 and M30 are identical for these fields
-       UINT64 ar3 = inst.M30.ar3;
-       UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
-       return (vcpu_set_ar(vcpu,ar3,imm));
-}
-
-static IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
+       u64 ar3 = inst.M30.ar3;
+       u64 imm = vcpu_get_gr(vcpu, inst.M30.imm);
+       return vcpu_set_ar(vcpu, ar3, imm);
+}
+
+static IA64FAULT priv_mov_to_ar_reg(VCPU * vcpu, INST64 inst)
 {
        // I26 and M29 are identical for these fields
-       UINT64 ar3 = inst.M29.ar3;
+       u64 ar3 = inst.M29.ar3;
 
        if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
                // privified mov from kr
-               UINT64 val;
-               if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
-                       return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
-               else return IA64_ILLOP_FAULT;
-       }
-       else {
-               UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
-               return (vcpu_set_ar(vcpu,ar3,r2));
+               u64 val;
+               if (vcpu_get_ar(vcpu, ar3, &val) != IA64_ILLOP_FAULT)
+                       return vcpu_set_gr(vcpu, inst.M29.r2 - 64, val, 0);
+               else
+                       return IA64_ILLOP_FAULT;
+       } else {
+               u64 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
+               return vcpu_set_ar(vcpu, ar3, r2);
        }
 }
 
@@ -230,177 +235,205 @@ static IA64FAULT priv_mov_to_ar_reg(VCPU
  * Moves to privileged registers
 ********************************/
 
-static IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_pkr(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_rr(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_dbr(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_ibr(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_pmc(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
-{
-       UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
-       UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
-       return (vcpu_set_pmd(vcpu,r3,r2));
-}
-
-static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
+static IA64FAULT priv_mov_to_pkr(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_pkr(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_rr(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_rr(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_dbr(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_dbr(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_ibr(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_ibr(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_pmc(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_pmc(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_pmd(VCPU * vcpu, INST64 inst)
+{
+       u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       return vcpu_set_pmd(vcpu, r3, r2);
+}
+
+static IA64FAULT priv_mov_to_cr(VCPU * vcpu, INST64 inst)
+{
+       u64 val = vcpu_get_gr(vcpu, inst.M32.r2);
        perfc_incra(mov_to_cr, inst.M32.cr3);
        switch (inst.M32.cr3) {
-           case 0: return vcpu_set_dcr(vcpu,val);
-           case 1: return vcpu_set_itm(vcpu,val);
-           case 2: return vcpu_set_iva(vcpu,val);
-           case 8: return vcpu_set_pta(vcpu,val);
-           case 16:return vcpu_set_ipsr(vcpu,val);
-           case 17:return vcpu_set_isr(vcpu,val);
-           case 19:return vcpu_set_iip(vcpu,val);
-           case 20:return vcpu_set_ifa(vcpu,val);
-           case 21:return vcpu_set_itir(vcpu,val);
-           case 22:return vcpu_set_iipa(vcpu,val);
-           case 23:return vcpu_set_ifs(vcpu,val);
-           case 24:return vcpu_set_iim(vcpu,val);
-           case 25:return vcpu_set_iha(vcpu,val);
-           case 64:return vcpu_set_lid(vcpu,val);
-           case 65:return IA64_ILLOP_FAULT;
-           case 66:return vcpu_set_tpr(vcpu,val);
-           case 67:return vcpu_set_eoi(vcpu,val);
-           case 68:return IA64_ILLOP_FAULT;
-           case 69:return IA64_ILLOP_FAULT;
-           case 70:return IA64_ILLOP_FAULT;
-           case 71:return IA64_ILLOP_FAULT;
-           case 72:return vcpu_set_itv(vcpu,val);
-           case 73:return vcpu_set_pmv(vcpu,val);
-           case 74:return vcpu_set_cmcv(vcpu,val);
-           case 80:return vcpu_set_lrr0(vcpu,val);
-           case 81:return vcpu_set_lrr1(vcpu,val);
-           default: return IA64_ILLOP_FAULT;
-       }
-}
-
-static IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
-{
-       UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
-       return vcpu_reset_psr_sm(vcpu,imm24);
-}
-
-static IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
-{
-       UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
-       return vcpu_set_psr_sm(vcpu,imm24);
+       case 0:
+               return vcpu_set_dcr(vcpu, val);
+       case 1:
+               return vcpu_set_itm(vcpu, val);
+       case 2:
+               return vcpu_set_iva(vcpu, val);
+       case 8:
+               return vcpu_set_pta(vcpu, val);
+       case 16:
+               return vcpu_set_ipsr(vcpu, val);
+       case 17:
+               return vcpu_set_isr(vcpu, val);
+       case 19:
+               return vcpu_set_iip(vcpu, val);
+       case 20:
+               return vcpu_set_ifa(vcpu, val);
+       case 21:
+               return vcpu_set_itir(vcpu, val);
+       case 22:
+               return vcpu_set_iipa(vcpu, val);
+       case 23:
+               return vcpu_set_ifs(vcpu, val);
+       case 24:
+               return vcpu_set_iim(vcpu, val);
+       case 25:
+               return vcpu_set_iha(vcpu, val);
+       case 64:
+               return vcpu_set_lid(vcpu, val);
+       case 65:
+               return IA64_ILLOP_FAULT;
+       case 66:
+               return vcpu_set_tpr(vcpu, val);
+       case 67:
+               return vcpu_set_eoi(vcpu, val);
+       case 68:
+               return IA64_ILLOP_FAULT;
+       case 69:
+               return IA64_ILLOP_FAULT;
+       case 70:
+               return IA64_ILLOP_FAULT;
+       case 71:
+               return IA64_ILLOP_FAULT;
+       case 72:
+               return vcpu_set_itv(vcpu, val);
+       case 73:
+               return vcpu_set_pmv(vcpu, val);
+       case 74:
+               return vcpu_set_cmcv(vcpu, val);
+       case 80:
+               return vcpu_set_lrr0(vcpu, val);
+       case 81:
+               return vcpu_set_lrr1(vcpu, val);
+       default:
+               return IA64_ILLOP_FAULT;
+       }
+}
+
+static IA64FAULT priv_rsm(VCPU * vcpu, INST64 inst)
+{
+       u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
+       return vcpu_reset_psr_sm(vcpu, imm24);
+}
+
+static IA64FAULT priv_ssm(VCPU * vcpu, INST64 inst)
+{
+       u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
+       return vcpu_set_psr_sm(vcpu, imm24);
 }
 
 /**
  * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
  */
-static IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
-       return vcpu_set_psr_l(vcpu,val);
+static IA64FAULT priv_mov_to_psr(VCPU * vcpu, INST64 inst)
+{
+       u64 val = vcpu_get_gr(vcpu, inst.M35.r2);
+       return vcpu_set_psr_l(vcpu, val);
 }
 
 /**********************************
  * Moves from privileged registers
  **********************************/
 
-static IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val;
+static IA64FAULT priv_mov_from_rr(VCPU * vcpu, INST64 inst)
+{
+       u64 val;
        IA64FAULT fault;
-       UINT64 reg;
-       
-       reg = vcpu_get_gr(vcpu,inst.M43.r3);
+       u64 reg;
+
+       reg = vcpu_get_gr(vcpu, inst.M43.r3);
        if (privify_en && inst.M43.r1 > 63) {
                // privified mov from cpuid
-               fault = vcpu_get_cpuid(vcpu,reg,&val);
+               fault = vcpu_get_cpuid(vcpu, reg, &val);
                if (fault == IA64_NO_FAULT)
-                       return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
-       }
-       else {
-               fault = vcpu_get_rr(vcpu,reg,&val);
+                       return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
+       } else {
+               fault = vcpu_get_rr(vcpu, reg, &val);
                if (fault == IA64_NO_FAULT)
                        return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
        }
        return fault;
 }
 
-static IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val;
+static IA64FAULT priv_mov_from_pkr(VCPU * vcpu, INST64 inst)
+{
+       u64 val;
        IA64FAULT fault;
-       
-       fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
+
+       fault = vcpu_get_pkr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
        if (fault == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
-       else return fault;
-}
-
-static IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val;
+       else
+               return fault;
+}
+
+static IA64FAULT priv_mov_from_dbr(VCPU * vcpu, INST64 inst)
+{
+       u64 val;
        IA64FAULT fault;
-       
-       fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
+
+       fault = vcpu_get_dbr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
        if (fault == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
-       else return fault;
-}
-
-static IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val;
+       else
+               return fault;
+}
+
+static IA64FAULT priv_mov_from_ibr(VCPU * vcpu, INST64 inst)
+{
+       u64 val;
        IA64FAULT fault;
-       
-       fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
+
+       fault = vcpu_get_ibr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
        if (fault == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
-       else return fault;
-}
-
-static IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
-{
-       UINT64 val;
+       else
+               return fault;
+}
+
+static IA64FAULT priv_mov_from_pmc(VCPU * vcpu, INST64 inst)
+{
+       u64 val;
        IA64FAULT fault;
-       UINT64 reg;
-       
-       reg = vcpu_get_gr(vcpu,inst.M43.r3);
+       u64 reg;
+
+       reg = vcpu_get_gr(vcpu, inst.M43.r3);
        if (privify_en && inst.M43.r1 > 63) {
                // privified mov from pmd
-               fault = vcpu_get_pmd(vcpu,reg,&val);
+               fault = vcpu_get_pmd(vcpu, reg, &val);
                if (fault == IA64_NO_FAULT)
-                       return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
-       }
-       else {
-               fault = vcpu_get_pmc(vcpu,reg,&val);
+                       return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
+       } else {
+               fault = vcpu_get_pmc(vcpu, reg, &val);
                if (fault == IA64_NO_FAULT)
                        return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
        }
@@ -410,55 +443,83 @@ static IA64FAULT priv_mov_from_pmc(VCPU 
 #define cr_get(cr) \
        ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
                vcpu_set_gr(vcpu, tgt, val, 0) : fault;
-       
-static IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 tgt = inst.M33.r1;
-       UINT64 val;
+
+static IA64FAULT priv_mov_from_cr(VCPU * vcpu, INST64 inst)
+{
+       u64 tgt = inst.M33.r1;
+       u64 val;
        IA64FAULT fault;
 
        perfc_incra(mov_from_cr, inst.M33.cr3);
        switch (inst.M33.cr3) {
-           case 0: return cr_get(dcr);
-           case 1: return cr_get(itm);
-           case 2: return cr_get(iva);
-           case 8: return cr_get(pta);
-           case 16:return cr_get(ipsr);
-           case 17:return cr_get(isr);
-           case 19:return cr_get(iip);
-           case 20:return cr_get(ifa);
-           case 21:return cr_get(itir);
-           case 22:return cr_get(iipa);
-           case 23:return cr_get(ifs);
-           case 24:return cr_get(iim);
-           case 25:return cr_get(iha);
-           case 64:return cr_get(lid);
-           case 65:return cr_get(ivr);
-           case 66:return cr_get(tpr);
-           case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
-           case 68:return cr_get(irr0);
-           case 69:return cr_get(irr1);
-           case 70:return cr_get(irr2);
-           case 71:return cr_get(irr3);
-           case 72:return cr_get(itv);
-           case 73:return cr_get(pmv);
-           case 74:return cr_get(cmcv);
-           case 80:return cr_get(lrr0);
-           case 81:return cr_get(lrr1);
-           default: return IA64_ILLOP_FAULT;
+       case 0:
+               return cr_get(dcr);
+       case 1:
+               return cr_get(itm);
+       case 2:
+               return cr_get(iva);
+       case 8:
+               return cr_get(pta);
+       case 16:
+               return cr_get(ipsr);
+       case 17:
+               return cr_get(isr);
+       case 19:
+               return cr_get(iip);
+       case 20:
+               return cr_get(ifa);
+       case 21:
+               return cr_get(itir);
+       case 22:
+               return cr_get(iipa);
+       case 23:
+               return cr_get(ifs);
+       case 24:
+               return cr_get(iim);
+       case 25:
+               return cr_get(iha);
+       case 64:
+               return cr_get(lid);
+       case 65:
+               return cr_get(ivr);
+       case 66:
+               return cr_get(tpr);
+       case 67:
+               return vcpu_set_gr(vcpu, tgt, 0L, 0);
+       case 68:
+               return cr_get(irr0);
+       case 69:
+               return cr_get(irr1);
+       case 70:
+               return cr_get(irr2);
+       case 71:
+               return cr_get(irr3);
+       case 72:
+               return cr_get(itv);
+       case 73:
+               return cr_get(pmv);
+       case 74:
+               return cr_get(cmcv);
+       case 80:
+               return cr_get(lrr0);
+       case 81:
+               return cr_get(lrr1);
+       default:
+               return IA64_ILLOP_FAULT;
        }
        return IA64_ILLOP_FAULT;
 }
 
-static IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
-{
-       UINT64 tgt = inst.M33.r1;
-       UINT64 val;
+static IA64FAULT priv_mov_from_psr(VCPU * vcpu, INST64 inst)
+{
+       u64 tgt = inst.M33.r1;
+       u64 val;
        IA64FAULT fault;
 
-       if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
+       if ((fault = vcpu_get_psr(vcpu, &val)) == IA64_NO_FAULT)
                return vcpu_set_gr(vcpu, tgt, val, 0);
-       else return fault;
+       else
+               return fault;
 }
 
 /**************************************************************************
@@ -483,28 +544,28 @@ static const IA64_SLOT_TYPE slot_types[0
 };
 
 // pointer to privileged emulation function
-typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
+typedef IA64FAULT(*PPEFCN) (VCPU * vcpu, INST64 inst);
 
 static const PPEFCN Mpriv_funcs[64] = {
-  priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
-  priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
-  0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
-  priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
-  priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
-  priv_mov_from_pmc, 0, 0, 0,
-  0, 0, 0, 0,
-  0, 0, priv_tpa, priv_tak,
-  0, 0, 0, 0,
-  priv_mov_from_cr, priv_mov_from_psr, 0, 0,
-  0, 0, 0, 0,
-  priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
-  0, 0, 0, 0,
-  priv_ptc_e, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0
+       priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
+       priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
+       0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
+       priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
+       priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr,
+       priv_mov_from_pkr,
+       priv_mov_from_pmc, 0, 0, 0,
+       0, 0, 0, 0,
+       0, 0, priv_tpa, priv_tak,
+       0, 0, 0, 0,
+       priv_mov_from_cr, priv_mov_from_psr, 0, 0,
+       0, 0, 0, 0,
+       priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
+       0, 0, 0, 0,
+       priv_ptc_e, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0
 };
 
-static IA64FAULT
-priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
+static IA64FAULT priv_handle_op(VCPU * vcpu, REGS * regs, int privlvl)
 {
        IA64_BUNDLE bundle;
        int slot;
@@ -512,85 +573,97 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
        INST64 inst;
        PPEFCN pfunc;
        unsigned long ipsr = regs->cr_ipsr;
-       UINT64 iip = regs->cr_iip;
+       u64 iip = regs->cr_iip;
        int x6;
-       
+
        // make a local copy of the bundle containing the privop
        if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
                //return vcpu_force_data_miss(vcpu, regs->cr_iip);
                return vcpu_force_inst_miss(vcpu, regs->cr_iip);
        }
-
 #if 0
-       if (iip==0xa000000100001820) {
+       if (iip == 0xa000000100001820) {
                static int firstpagefault = 1;
                if (firstpagefault) {
-                       printf("*** First time to domain page fault!\n");       
                        firstpagefault=0;
+                       printf("*** First time to domain page fault!\n");
+                       firstpagefault = 0;
                }
        }
 #endif
        if (privop_trace) {
                static long i = 400;
                //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
-               if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, 
itc=%lx, itm=%lx\n",
-                       iip,ia64_get_itc(),ia64_get_itm());
+               if (i > 0)
+                       printf("priv_handle_op: privop trace at 0x%lx, "
+                              "itc=%lx, itm=%lx\n",
+                              iip, ia64_get_itc(), ia64_get_itm());
                i--;
        }
        slot = ((struct ia64_psr *)&ipsr)->ri;
-       if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
+       if (!slot)
+               inst.inst = (bundle.i64[0] >> 5) & MASK_41;
        else if (slot == 1)
-               inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
-       else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41; 
-       else printf("priv_handle_op: illegal slot: %d\n", slot);
+               inst.inst =
+                   ((bundle.i64[0] >> 46) | bundle.i64[1] << 18) & MASK_41;
+       else if (slot == 2)
+               inst.inst = (bundle.i64[1] >> 23) & MASK_41;
+       else
+               printf("priv_handle_op: illegal slot: %d\n", slot);
 
        slot_type = slot_types[bundle.template][slot];
        if (priv_verbose) {
-               printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) 
slot %d (type=%d)\n",
-                iip, (UINT64)inst.inst, slot, slot_type);
+               printf("priv_handle_op: checking bundle at 0x%lx "
+                      "(op=0x%016lx) slot %d (type=%d)\n",
+                      iip, (u64) inst.inst, slot, slot_type);
        }
        if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
                // break instr for privified cover
-       }
-       else if (privlvl != 2) return (IA64_ILLOP_FAULT);
+       } else if (privlvl != 2)
+               return IA64_ILLOP_FAULT;
        switch (slot_type) {
-           case M:
+       case M:
                if (inst.generic.major == 0) {
 #if 0
                        if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
                                privcnt.cover++;
-                               return priv_cover(vcpu,inst);
+                               return priv_cover(vcpu, inst);
                        }
 #endif
-                       if (inst.M29.x3 != 0) break;
+                       if (inst.M29.x3 != 0)
+                               break;
                        if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
                                perfc_incrc(mov_to_ar_imm);
-                               return priv_mov_to_ar_imm(vcpu,inst);
+                               return priv_mov_to_ar_imm(vcpu, inst);
                        }
                        if (inst.M44.x4 == 6) {
                                perfc_incrc(ssm);
-                               return priv_ssm(vcpu,inst);
+                               return priv_ssm(vcpu, inst);
                        }
                        if (inst.M44.x4 == 7) {
                                perfc_incrc(rsm);
-                               return priv_rsm(vcpu,inst);
+                               return priv_rsm(vcpu, inst);
                        }
                        break;
-               }
-               else if (inst.generic.major != 1) break;
+               } else if (inst.generic.major != 1)
+                       break;
                x6 = inst.M29.x6;
                if (x6 == 0x2a) {
                        if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
                                perfc_incrc(mov_from_ar); // privified mov from 
kr
                        else
                                perfc_incrc(mov_to_ar_reg);
-                       return priv_mov_to_ar_reg(vcpu,inst);
-               }
-               if (inst.M29.x3 != 0) break;
-               if (!(pfunc = Mpriv_funcs[x6])) break;
-               if (x6 == 0x1e || x6 == 0x1f)  { // tpa or tak are "special"
+                       return priv_mov_to_ar_reg(vcpu, inst);
+               }
+               if (inst.M29.x3 != 0)
+                       break;
+               if (!(pfunc = Mpriv_funcs[x6]))
+                       break;
+               if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
                        if (privify_en && inst.M46.r3 > 63) {
-                               if (x6 == 0x1e) x6 = 0x1b;
-                               else x6 = 0x1a;
+                               if (x6 == 0x1e)
+                                       x6 = 0x1b;
+                               else
+                                       x6 = 0x1a;
                        }
                }
                if (privify_en && x6 == 52 && inst.M28.r3 > 63)
@@ -599,61 +672,66 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
                        perfc_incrc(cpuid);
                else
                        perfc_incra(misc_privop, x6);
-               return (*pfunc)(vcpu,inst);
+               return (*pfunc) (vcpu, inst);
                break;
-           case B:
-               if (inst.generic.major != 0) break;
+       case B:
+               if (inst.generic.major != 0)
+                       break;
                if (inst.B8.x6 == 0x08) {
                        IA64FAULT fault;
                        perfc_incrc(rfi);
-                       fault = priv_rfi(vcpu,inst);
-                       if (fault == IA64_NO_FAULT) fault = 
IA64_RFI_IN_PROGRESS;
+                       fault = priv_rfi(vcpu, inst);
+                       if (fault == IA64_NO_FAULT)
+                               fault = IA64_RFI_IN_PROGRESS;
                        return fault;
                }
                if (inst.B8.x6 == 0x0c) {
                        perfc_incrc(bsw0);
-                       return priv_bsw0(vcpu,inst);
+                       return priv_bsw0(vcpu, inst);
                }
                if (inst.B8.x6 == 0x0d) {
                        perfc_incrc(bsw1);
-                       return priv_bsw1(vcpu,inst);
+                       return priv_bsw1(vcpu, inst);
                }
                if (inst.B8.x6 == 0x0) {
                        // break instr for privified cover
                        perfc_incrc(cover);
-                       return priv_cover(vcpu,inst);
+                       return priv_cover(vcpu, inst);
                }
                break;
-           case I:
-               if (inst.generic.major != 0) break;
+       case I:
+               if (inst.generic.major != 0)
+                       break;
 #if 0
                if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
                        perfc_incrc(cover);
-                       return priv_cover(vcpu,inst);
+                       return priv_cover(vcpu, inst);
                }
 #endif
-               if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
+               if (inst.I26.x3 != 0)
+                       break;  // I26.x3 == I27.x3
                if (inst.I26.x6 == 0x2a) {
                        if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
-                               perfc_incrc(mov_from_ar); // privified mov from 
kr
-                       else 
+                               perfc_incrc(mov_from_ar);       // privified 
mov from kr
+                       else
                                perfc_incrc(mov_to_ar_reg);
-                       return priv_mov_to_ar_reg(vcpu,inst);
+                       return priv_mov_to_ar_reg(vcpu, inst);
                }
                if (inst.I27.x6 == 0x0a) {
                        perfc_incrc(mov_to_ar_imm);
-                       return priv_mov_to_ar_imm(vcpu,inst);
+                       return priv_mov_to_ar_imm(vcpu, inst);
                }
                break;
-           default:
+       default:
                break;
        }
-        //printf("We who are about do die salute you\n");
-       printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot 
%d (type=%d), ipsr=0x%lx\n",
-                iip, (UINT64)inst.inst, slot, slot_type, ipsr);
-        //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
-        //thread_mozambique("privop fault\n");
-       return (IA64_ILLOP_FAULT);
+       //printf("We who are about do die salute you\n");
+       printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) "
+              "slot %d (type=%d), ipsr=0x%lx\n",
+              iip, (u64) inst.inst, slot, slot_type, ipsr);
+       //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
+       //thread_mozambique("privop fault\n");
+       return IA64_ILLOP_FAULT;
 }
 
 /** Emulate a privileged operation.
@@ -666,142 +744,139 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
  * @param isrcode interrupt service routine code
  * @return fault
  */
-IA64FAULT
-priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
+IA64FAULT priv_emulate(VCPU * vcpu, REGS * regs, u64 isr)
 {
        IA64FAULT fault;
-       UINT64 ipsr = regs->cr_ipsr;
-       UINT64 isrcode = (isr >> 4) & 0xf;
+       u64 ipsr = regs->cr_ipsr;
+       u64 isrcode = (isr >> 4) & 0xf;
        int privlvl;
 
        // handle privops masked as illops? and breaks (6)
        if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
-               printf("priv_emulate: isrcode != 0 or 1 or 2\n");
+               printf("priv_emulate: isrcode != 0 or 1 or 2\n");
                printf("priv_emulate: returning ILLOP, not implemented!\n");
-               while (1);
+               while (1) ;
                return IA64_ILLOP_FAULT;
        }
        //if (isrcode != 1 && isrcode != 2) return 0;
        privlvl = ia64_get_cpl(ipsr);
        // its OK for a privified-cover to be executed in user-land
-       fault = priv_handle_op(vcpu,regs,privlvl);
-       if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // 
success!!
+       fault = priv_handle_op(vcpu, regs, privlvl);
+       if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) {
+               // success!!
                // update iip/ipsr to point to the next instruction
                (void)vcpu_increment_iip(vcpu);
        }
        if (fault == IA64_ILLOP_FAULT)
                printf("priv_emulate: priv_handle_op fails, "
-                      "isr=0x%lx iip=%lx\n",isr, regs->cr_iip);
+                      "isr=0x%lx iip=%lx\n", isr, regs->cr_iip);
        return fault;
 }
 
 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
  * so this code is primarily used for debugging them */
-int
-ia64_hyperprivop(unsigned long iim, REGS *regs)
+int ia64_hyperprivop(unsigned long iim, REGS * regs)
 {
        struct vcpu *v = current;
-       UINT64 val;
-       UINT64 itir, ifa;
+       u64 val;
+       u64 itir, ifa;
 
        if (!iim || iim > HYPERPRIVOP_MAX) {
                panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
-                            iim, regs->cr_iip);
+                            iim, regs->cr_iip);
                return 1;
        }
        perfc_incra(slow_hyperprivop, iim);
-       switch(iim) {
-           case HYPERPRIVOP_RFI:
-               (void)vcpu_rfi(v);
+       switch (iim) {
+       case HYPERPRIVOP_RFI:
+               vcpu_rfi(v);
                return 0;       // don't update iip
-           case HYPERPRIVOP_RSM_DT:
-               (void)vcpu_reset_psr_dt(v);
-               return 1;
-           case HYPERPRIVOP_SSM_DT:
-               (void)vcpu_set_psr_dt(v);
-               return 1;
-           case HYPERPRIVOP_COVER:
-               (void)vcpu_cover(v);
-               return 1;
-           case HYPERPRIVOP_ITC_D:
-               (void)vcpu_get_itir(v,&itir);
-               (void)vcpu_get_ifa(v,&ifa);
-               (void)vcpu_itc_d(v,regs->r8,itir,ifa);
-               return 1;
-           case HYPERPRIVOP_ITC_I:
-               (void)vcpu_get_itir(v,&itir);
-               (void)vcpu_get_ifa(v,&ifa);
-               (void)vcpu_itc_i(v,regs->r8,itir,ifa);
-               return 1;
-           case HYPERPRIVOP_SSM_I:
-               (void)vcpu_set_psr_i(v);
-               return 1;
-           case HYPERPRIVOP_GET_IVR:
-               (void)vcpu_get_ivr(v,&val);
+       case HYPERPRIVOP_RSM_DT:
+               vcpu_reset_psr_dt(v);
+               return 1;
+       case HYPERPRIVOP_SSM_DT:
+               vcpu_set_psr_dt(v);
+               return 1;
+       case HYPERPRIVOP_COVER:
+               vcpu_cover(v);
+               return 1;
+       case HYPERPRIVOP_ITC_D:
+               vcpu_get_itir(v, &itir);
+               vcpu_get_ifa(v, &ifa);
+               vcpu_itc_d(v, regs->r8, itir, ifa);
+               return 1;
+       case HYPERPRIVOP_ITC_I:
+               vcpu_get_itir(v, &itir);
+               vcpu_get_ifa(v, &ifa);
+               vcpu_itc_i(v, regs->r8, itir, ifa);
+               return 1;
+       case HYPERPRIVOP_SSM_I:
+               vcpu_set_psr_i(v);
+               return 1;
+       case HYPERPRIVOP_GET_IVR:
+               vcpu_get_ivr(v, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_GET_TPR:
-               (void)vcpu_get_tpr(v,&val);
+       case HYPERPRIVOP_GET_TPR:
+               vcpu_get_tpr(v, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_SET_TPR:
-               (void)vcpu_set_tpr(v,regs->r8);
-               return 1;
-           case HYPERPRIVOP_EOI:
-               (void)vcpu_set_eoi(v,0L);
-               return 1;
-           case HYPERPRIVOP_SET_ITM:
-               (void)vcpu_set_itm(v,regs->r8);
-               return 1;
-           case HYPERPRIVOP_THASH:
-               (void)vcpu_thash(v,regs->r8,&val);
+       case HYPERPRIVOP_SET_TPR:
+               vcpu_set_tpr(v, regs->r8);
+               return 1;
+       case HYPERPRIVOP_EOI:
+               vcpu_set_eoi(v, 0L);
+               return 1;
+       case HYPERPRIVOP_SET_ITM:
+               vcpu_set_itm(v, regs->r8);
+               return 1;
+       case HYPERPRIVOP_THASH:
+               vcpu_thash(v, regs->r8, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_PTC_GA:
-               (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
-               return 1;
-           case HYPERPRIVOP_ITR_D:
-               (void)vcpu_get_itir(v,&itir);
-               (void)vcpu_get_ifa(v,&ifa);
-               (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
-               return 1;
-           case HYPERPRIVOP_GET_RR:
-               (void)vcpu_get_rr(v,regs->r8,&val);
+       case HYPERPRIVOP_PTC_GA:
+               vcpu_ptc_ga(v, regs->r8, (1L << ((regs->r9 & 0xfc) >> 2)));
+               return 1;
+       case HYPERPRIVOP_ITR_D:
+               vcpu_get_itir(v, &itir);
+               vcpu_get_ifa(v, &ifa);
+               vcpu_itr_d(v, regs->r8, regs->r9, itir, ifa);
+               return 1;
+       case HYPERPRIVOP_GET_RR:
+               vcpu_get_rr(v, regs->r8, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_SET_RR:
-               (void)vcpu_set_rr(v,regs->r8,regs->r9);
-               return 1;
-           case HYPERPRIVOP_SET_KR:
-               (void)vcpu_set_ar(v,regs->r8,regs->r9);
-               return 1;
-           case HYPERPRIVOP_FC:
-               (void)vcpu_fc(v,regs->r8);
-               return 1;
-           case HYPERPRIVOP_GET_CPUID:
-               (void)vcpu_get_cpuid(v,regs->r8,&val);
+       case HYPERPRIVOP_SET_RR:
+               vcpu_set_rr(v, regs->r8, regs->r9);
+               return 1;
+       case HYPERPRIVOP_SET_KR:
+               vcpu_set_ar(v, regs->r8, regs->r9);
+               return 1;
+       case HYPERPRIVOP_FC:
+               vcpu_fc(v, regs->r8);
+               return 1;
+       case HYPERPRIVOP_GET_CPUID:
+               vcpu_get_cpuid(v, regs->r8, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_GET_PMD:
-               (void)vcpu_get_pmd(v,regs->r8,&val);
+       case HYPERPRIVOP_GET_PMD:
+               vcpu_get_pmd(v, regs->r8, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_GET_EFLAG:
-               (void)vcpu_get_ar(v,24,&val);
+       case HYPERPRIVOP_GET_EFLAG:
+               vcpu_get_ar(v, 24, &val);
                regs->r8 = val;
                return 1;
-           case HYPERPRIVOP_SET_EFLAG:
-               (void)vcpu_set_ar(v,24,regs->r8);
-               return 1;
-           case HYPERPRIVOP_RSM_BE:
-               (void)vcpu_reset_psr_sm(v, IA64_PSR_BE);
-               return 1;
-           case HYPERPRIVOP_GET_PSR:
-               (void)vcpu_get_psr(v, &val);
+       case HYPERPRIVOP_SET_EFLAG:
+               vcpu_set_ar(v, 24, regs->r8);
+               return 1;
+       case HYPERPRIVOP_RSM_BE:
+               vcpu_reset_psr_sm(v, IA64_PSR_BE);
+               return 1;
+       case HYPERPRIVOP_GET_PSR:
+               vcpu_get_psr(v, &val);
                regs->r8 = val;
                return 1;
        }
        return 0;
 }
-
-
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Tue Oct 17 15:43:41 2006 -0600
@@ -28,27 +28,31 @@
 #include <asm/tlb_track.h>
 
 /* FIXME: where these declarations should be there ? */
-extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct 
pt_regs *regs);
-extern void setreg(unsigned long regnum, unsigned long val, int nat, struct 
pt_regs *regs);
-extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct 
pt_regs *regs);
-
-extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct 
pt_regs *regs);
-
-typedef        union {
+extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
+                   struct pt_regs *regs);
+extern void setreg(unsigned long regnum, unsigned long val, int nat,
+                   struct pt_regs *regs);
+extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+                     struct pt_regs *regs);
+
+extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+                     struct pt_regs *regs);
+
+typedef union {
        struct ia64_psr ia64_psr;
        unsigned long i64;
 } PSR;
 
 // this def for vcpu_regs won't work if kernel stack is present
-//#define      vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
-
-#define        TRUE    1
-#define        FALSE   0
+//#define       vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
+
+#define        TRUE                    1
+#define        FALSE                   0
 #define        IA64_PTA_SZ_BIT         2
 #define        IA64_PTA_VF_BIT         8
 #define        IA64_PTA_BASE_BIT       15
 #define        IA64_PTA_LFMT           (1UL << IA64_PTA_VF_BIT)
-#define        IA64_PTA_SZ(x)  (x##UL << IA64_PTA_SZ_BIT)
+#define        IA64_PTA_SZ(x)          (x##UL << IA64_PTA_SZ_BIT)
 
 unsigned long vcpu_verbose = 0;
 
@@ -56,23 +60,23 @@ unsigned long vcpu_verbose = 0;
  VCPU general register access routines
 **************************************************************************/
 #ifdef XEN
-UINT64
-vcpu_get_gr(VCPU *vcpu, unsigned long reg)
+u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
 {
        REGS *regs = vcpu_regs(vcpu);
-       UINT64 val;
-
-       if (!reg) return 0;
-       getreg(reg,&val,0,regs);        // FIXME: handle NATs later
+       u64 val;
+
+       if (!reg)
+               return 0;
+       getreg(reg, &val, 0, regs);     // FIXME: handle NATs later
        return val;
 }
-IA64FAULT
-vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
+
+IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
 {
        REGS *regs = vcpu_regs(vcpu);
        int nat;
 
-       getreg(reg,val,&nat,regs);      // FIXME: handle NATs later
+       getreg(reg, val, &nat, regs);   // FIXME: handle NATs later
        if (nat)
                return IA64_NAT_CONSUMPTION_VECTOR;
        return 0;
@@ -81,32 +85,33 @@ vcpu_get_gr_nat(VCPU *vcpu, unsigned lon
 // returns:
 //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
 //   IA64_NO_FAULT otherwise
-IA64FAULT
-vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
+IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
 {
        REGS *regs = vcpu_regs(vcpu);
        long sof = (regs->cr_ifs) & 0x7f;
 
-       if (!reg) return IA64_ILLOP_FAULT;
-       if (reg >= sof + 32) return IA64_ILLOP_FAULT;
-       setreg(reg,value,nat,regs);     // FIXME: handle NATs later
+       if (!reg)
+               return IA64_ILLOP_FAULT;
+       if (reg >= sof + 32)
+               return IA64_ILLOP_FAULT;
+       setreg(reg, value, nat, regs);  // FIXME: handle NATs later
        return IA64_NO_FAULT;
 }
 
 IA64FAULT
-vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
+vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
 {
        REGS *regs = vcpu_regs(vcpu);
-       getfpreg(reg,val,regs); // FIXME: handle NATs later
+       getfpreg(reg, val, regs);       // FIXME: handle NATs later
        return IA64_NO_FAULT;
 }
 
 IA64FAULT
-vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
+vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
 {
        REGS *regs = vcpu_regs(vcpu);
-       if(reg > 1)
-               setfpreg(reg,val,regs); // FIXME: handle NATs later
+       if (reg > 1)
+               setfpreg(reg, val, regs);       // FIXME: handle NATs later
        return IA64_NO_FAULT;
 }
 
@@ -114,38 +119,39 @@ vcpu_set_fpreg(VCPU *vcpu, unsigned long
 // returns:
 //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
 //   IA64_NO_FAULT otherwise
-IA64FAULT
-vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
+IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
 {
        REGS *regs = vcpu_regs(vcpu);
        long sof = (regs->cr_ifs) & 0x7f;
 
-       if (!reg) return IA64_ILLOP_FAULT;
-       if (reg >= sof + 32) return IA64_ILLOP_FAULT;
-       setreg(reg,value,0,regs);       // FIXME: handle NATs later
+       if (!reg)
+               return IA64_ILLOP_FAULT;
+       if (reg >= sof + 32)
+               return IA64_ILLOP_FAULT;
+       setreg(reg, value, 0, regs);    // FIXME: handle NATs later
        return IA64_NO_FAULT;
 }
 
 #endif
 
-void vcpu_init_regs (struct vcpu *v)
+void vcpu_init_regs(struct vcpu *v)
 {
        struct pt_regs *regs;
 
-       regs = vcpu_regs (v);
+       regs = vcpu_regs(v);
        if (VMX_DOMAIN(v)) {
                /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
                /* Need to be expanded as macro */
                regs->cr_ipsr = 0x501008826008;
        } else {
                regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
-                 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
+                   | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
                regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
                                   | IA64_PSR_RI | IA64_PSR_IS);
                // domain runs at PL2
                regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
        }
-       regs->cr_ifs = 1UL << 63; /* or clear? */
+       regs->cr_ifs = 1UL << 63;       /* or clear? */
        regs->ar_fpsr = FPSR_DEFAULT;
 
        if (VMX_DOMAIN(v)) {
@@ -155,13 +161,13 @@ void vcpu_init_regs (struct vcpu *v)
                VCPU(v, dcr) = 0;
        } else {
                init_all_rr(v);
-               regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+               regs->ar_rsc |= (2 << 2);       /* force PL2/3 */
                VCPU(v, banknum) = 1;
                VCPU(v, metaphysical_mode) = 1;
                VCPU(v, interrupt_mask_addr) =
-                            (unsigned char *)v->domain->arch.shared_info_va +
-                            INT_ENABLE_OFFSET(v);
-               VCPU(v, itv) = (1 << 16); /* timer vector masked */
+                   (unsigned char *)v->domain->arch.shared_info_va +
+                   INT_ENABLE_OFFSET(v);
+               VCPU(v, itv) = (1 << 16);       /* timer vector masked */
        }
 
        v->arch.domain_itm_last = -1L;
@@ -171,7 +177,7 @@ void vcpu_init_regs (struct vcpu *v)
  VCPU privileged application register access routines
 **************************************************************************/
 
-void vcpu_load_kernel_regs(VCPU *vcpu)
+void vcpu_load_kernel_regs(VCPU * vcpu)
 {
        ia64_set_kr(0, VCPU(vcpu, krs[0]));
        ia64_set_kr(1, VCPU(vcpu, krs[1]));
@@ -186,26 +192,33 @@ void vcpu_load_kernel_regs(VCPU *vcpu)
 /* GCC 4.0.2 seems not to be able to suppress this call!.  */
 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
 
-IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-       if (reg == 44) return (vcpu_set_itc(vcpu,val));
-       else if (reg == 27) return (IA64_ILLOP_FAULT);
+IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
+{
+       if (reg == 44)
+               return vcpu_set_itc(vcpu, val);
+       else if (reg == 27)
+               return IA64_ILLOP_FAULT;
        else if (reg == 24)
-           printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
-       else if (reg > 7) return (IA64_ILLOP_FAULT);
+               printf("warning: setting ar.eflg is a no-op; no IA-32 "
+                      "support\n");
+       else if (reg > 7)
+               return IA64_ILLOP_FAULT;
        else {
-               PSCB(vcpu,krs[reg]) = val;
-               ia64_set_kr(reg,val);
-       }
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
+               PSCB(vcpu, krs[reg]) = val;
+               ia64_set_kr(reg, val);
+       }
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
 {
        if (reg == 24)
-           printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
-       else if (reg > 7) return (IA64_ILLOP_FAULT);
-       else *val = PSCB(vcpu,krs[reg]);
+               printf("warning: getting ar.eflg is a no-op; no IA-32 "
+                      "support\n");
+       else if (reg > 7)
+               return IA64_ILLOP_FAULT;
+       else
+               *val = PSCB(vcpu, krs[reg]);
        return IA64_NO_FAULT;
 }
 
@@ -213,24 +226,25 @@ IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64
  VCPU processor status register access routines
 **************************************************************************/
 
-void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
+void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
 {
        /* only do something if mode changes */
-       if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
-               PSCB(vcpu,metaphysical_mode) = newmode;
-               if (newmode) set_metaphysical_rr0();
-               else if (PSCB(vcpu,rrs[0]) != -1)
-                       set_one_rr(0, PSCB(vcpu,rrs[0]));
-       }
-}
-
-IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
-{
-       vcpu_set_metaphysical_mode(vcpu,TRUE);
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
+       if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
+               PSCB(vcpu, metaphysical_mode) = newmode;
+               if (newmode)
+                       set_metaphysical_rr0();
+               else if (PSCB(vcpu, rrs[0]) != -1)
+                       set_one_rr(0, PSCB(vcpu, rrs[0]));
+       }
+}
+
+IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
+{
+       vcpu_set_metaphysical_mode(vcpu, TRUE);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
 {
        struct ia64_psr psr, imm, *ipsr;
        REGS *regs = vcpu_regs(vcpu);
@@ -238,72 +252,89 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
        //PRIVOP_COUNT_ADDR(regs,_RSM);
        // TODO: All of these bits need to be virtualized
        // TODO: Only allowed for current vcpu
-       __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
+       __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        imm = *(struct ia64_psr *)&imm24;
        // interrupt flag
        if (imm.i)
-           vcpu->vcpu_info->evtchn_upcall_mask = 1;
-       if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 0;
+               vcpu->vcpu_info->evtchn_upcall_mask = 1;
+       if (imm.ic)
+               PSCB(vcpu, interrupt_collection_enabled) = 0;
        // interrupt collection flag
        //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
        // just handle psr.up and psr.pp for now
-       if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
-               | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
-               | IA64_PSR_DFL | IA64_PSR_DFH))
-                       return (IA64_ILLOP_FAULT);
-       if (imm.dfh) ipsr->dfh = 0;
-       if (imm.dfl) ipsr->dfl = 0;
+       if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
+                     IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
+                     IA64_PSR_DFL | IA64_PSR_DFH))
+               return IA64_ILLOP_FAULT;
+       if (imm.dfh)
+               ipsr->dfh = 0;
+       if (imm.dfl)
+               ipsr->dfl = 0;
        if (imm.pp) {
                ipsr->pp = 1;
                psr.pp = 1;     // priv perf ctrs always enabled
-               PSCB(vcpu,vpsr_pp) = 0; // but fool the domain if it gets psr
-       }
-       if (imm.up) { ipsr->up = 0; psr.up = 0; }
-       if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
-       if (imm.be) ipsr->be = 0;
-       if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
-       __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
-       return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
-{
-       vcpu_set_metaphysical_mode(vcpu,FALSE);
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
+               PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
+       }
+       if (imm.up) {
+               ipsr->up = 0;
+               psr.up = 0;
+       }
+       if (imm.sp) {
+               ipsr->sp = 0;
+               psr.sp = 0;
+       }
+       if (imm.be)
+               ipsr->be = 0;
+       if (imm.dt)
+               vcpu_set_metaphysical_mode(vcpu, TRUE);
+       __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
+{
+       vcpu_set_metaphysical_mode(vcpu, FALSE);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
 {
        vcpu->vcpu_info->evtchn_upcall_mask = 0;
-       PSCB(vcpu,interrupt_collection_enabled) = 1;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
+       PSCB(vcpu, interrupt_collection_enabled) = 1;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
 {
        struct ia64_psr psr, imm, *ipsr;
        REGS *regs = vcpu_regs(vcpu);
-       UINT64 mask, enabling_interrupts = 0;
+       u64 mask, enabling_interrupts = 0;
 
        //PRIVOP_COUNT_ADDR(regs,_SSM);
        // TODO: All of these bits need to be virtualized
-       __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
+       __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
        imm = *(struct ia64_psr *)&imm24;
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        // just handle psr.sp,pp and psr.i,ic (and user mask) for now
-       mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
-               IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
-       if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
-       if (imm.dfh) ipsr->dfh = 1;
-       if (imm.dfl) ipsr->dfl = 1;
+       mask =
+           IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
+           IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH;
+       if (imm24 & ~mask)
+               return IA64_ILLOP_FAULT;
+       if (imm.dfh)
+               ipsr->dfh = 1;
+       if (imm.dfl)
+               ipsr->dfl = 1;
        if (imm.pp) {
                ipsr->pp = 1;
                psr.pp = 1;
-               PSCB(vcpu,vpsr_pp) = 1;
-       }
-       if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
+               PSCB(vcpu, vpsr_pp) = 1;
+       }
+       if (imm.sp) {
+               ipsr->sp = 1;
+               psr.sp = 1;
+       }
        if (imm.i) {
                if (vcpu->vcpu_info->evtchn_upcall_mask) {
 //printf("vcpu_set_psr_sm: psr.ic 0->1\n");
@@ -311,114 +342,169 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
                }
                vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
-       if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
+       if (imm.ic)
+               PSCB(vcpu, interrupt_collection_enabled) = 1;
        // TODO: do this faster
-       if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
-       if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
-       if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
-       if (imm.up) { ipsr->up = 1; psr.up = 1; }
+       if (imm.mfl) {
+               ipsr->mfl = 1;
+               psr.mfl = 1;
+       }
+       if (imm.mfh) {
+               ipsr->mfh = 1;
+               psr.mfh = 1;
+       }
+       if (imm.ac) {
+               ipsr->ac = 1;
+               psr.ac = 1;
+       }
+       if (imm.up) {
+               ipsr->up = 1;
+               psr.up = 1;
+       }
        if (imm.be) {
                printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
-               return (IA64_ILLOP_FAULT);
-       }
-       if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
-       __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
+               return IA64_ILLOP_FAULT;
+       }
+       if (imm.dt)
+               vcpu_set_metaphysical_mode(vcpu, FALSE);
+       __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
        if (enabling_interrupts &&
-               vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
-                       PSCB(vcpu,pending_interruption) = 1;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
+           vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
+               PSCB(vcpu, pending_interruption) = 1;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
 {
        struct ia64_psr psr, newpsr, *ipsr;
        REGS *regs = vcpu_regs(vcpu);
-       UINT64 enabling_interrupts = 0;
+       u64 enabling_interrupts = 0;
 
        // TODO: All of these bits need to be virtualized
-       __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
+       __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
        newpsr = *(struct ia64_psr *)&val;
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        // just handle psr.up and psr.pp for now
-       //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return 
(IA64_ILLOP_FAULT);
+       //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
+       //      return IA64_ILLOP_FAULT;
        // however trying to set other bits can't be an error as it is in ssm
-       if (newpsr.dfh) ipsr->dfh = 1;
-       if (newpsr.dfl) ipsr->dfl = 1;
+       if (newpsr.dfh)
+               ipsr->dfh = 1;
+       if (newpsr.dfl)
+               ipsr->dfl = 1;
        if (newpsr.pp) {
-               ipsr->pp = 1; psr.pp = 1;
-               PSCB(vcpu,vpsr_pp) = 1;
-       }
-       else {
-               ipsr->pp = 1; psr.pp = 1;
-               PSCB(vcpu,vpsr_pp) = 0;
-       }
-       if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
-       if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
+               ipsr->pp = 1;
+               psr.pp = 1;
+               PSCB(vcpu, vpsr_pp) = 1;
+       } else {
+               ipsr->pp = 1;
+               psr.pp = 1;
+               PSCB(vcpu, vpsr_pp) = 0;
+       }
+       if (newpsr.up) {
+               ipsr->up = 1;
+               psr.up = 1;
+       }
+       if (newpsr.sp) {
+               ipsr->sp = 1;
+               psr.sp = 1;
+       }
        if (newpsr.i) {
                if (vcpu->vcpu_info->evtchn_upcall_mask)
                        enabling_interrupts = 1;
                vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
-       if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
-       if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
-       if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
-       if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
-       if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
-       if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
-       else vcpu_set_metaphysical_mode(vcpu,TRUE);
+       if (newpsr.ic)
+               PSCB(vcpu, interrupt_collection_enabled) = 1;
+       if (newpsr.mfl) {
+               ipsr->mfl = 1;
+               psr.mfl = 1;
+       }
+       if (newpsr.mfh) {
+               ipsr->mfh = 1;
+               psr.mfh = 1;
+       }
+       if (newpsr.ac) {
+               ipsr->ac = 1;
+               psr.ac = 1;
+       }
+       if (newpsr.up) {
+               ipsr->up = 1;
+               psr.up = 1;
+       }
+       if (newpsr.dt && newpsr.rt)
+               vcpu_set_metaphysical_mode(vcpu, FALSE);
+       else
+               vcpu_set_metaphysical_mode(vcpu, TRUE);
        if (newpsr.be) {
                printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
-               return (IA64_ILLOP_FAULT);
+               return IA64_ILLOP_FAULT;
        }
        if (enabling_interrupts &&
-               vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
-                       PSCB(vcpu,pending_interruption) = 1;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
+           vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
+               PSCB(vcpu, pending_interruption) = 1;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
 {
        REGS *regs = vcpu_regs(vcpu);
        struct ia64_psr newpsr;
 
        newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
-       if (newpsr.cpl == 2) newpsr.cpl = 0;
-       if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
-       else newpsr.i = 0;
-       if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
-       else newpsr.ic = 0;
-       if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
-       else newpsr.dt = 1;
-       if (PSCB(vcpu,vpsr_pp)) newpsr.pp = 1;
-       else newpsr.pp = 0;
+       if (newpsr.cpl == 2)
+               newpsr.cpl = 0;
+       if (!vcpu->vcpu_info->evtchn_upcall_mask)
+               newpsr.i = 1;
+       else
+               newpsr.i = 0;
+       if (PSCB(vcpu, interrupt_collection_enabled))
+               newpsr.ic = 1;
+       else
+               newpsr.ic = 0;
+       if (PSCB(vcpu, metaphysical_mode))
+               newpsr.dt = 0;
+       else
+               newpsr.dt = 1;
+       if (PSCB(vcpu, vpsr_pp))
+               newpsr.pp = 1;
+       else
+               newpsr.pp = 0;
        *pval = *(unsigned long *)&newpsr;
        return IA64_NO_FAULT;
 }
 
-BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
-{
-       return !!PSCB(vcpu,interrupt_collection_enabled);
-}
-
-BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
+BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
+{
+       return !!PSCB(vcpu, interrupt_collection_enabled);
+}
+
+BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
 {
        return !vcpu->vcpu_info->evtchn_upcall_mask;
 }
 
-UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
-{
-       UINT64 dcr = PSCBX(vcpu,dcr);
+u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
+{
+       u64 dcr = PSCBX(vcpu, dcr);
        PSR psr;
 
        //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
        psr.i64 = prevpsr;
-       psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
-       psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
-       psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
+       psr.ia64_psr.be = 0;
+       if (dcr & IA64_DCR_BE)
+               psr.ia64_psr.be = 1;
+       psr.ia64_psr.pp = 0;
+       if (dcr & IA64_DCR_PP)
+               psr.ia64_psr.pp = 1;
+       psr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
        psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
-       psr.ia64_psr.bn = PSCB(vcpu,banknum);
-       psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
-       if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
+       psr.ia64_psr.bn = PSCB(vcpu, banknum);
+       psr.ia64_psr.dt = 1;
+       psr.ia64_psr.it = 1;
+       psr.ia64_psr.rt = 1;
+       if (psr.ia64_psr.cpl == 2)
+               psr.ia64_psr.cpl = 0;   // !!!! fool domain
        // psr.pk = 1;
        //printf("returns 0x%016lx...\n",psr.i64);
        return psr.i64;
@@ -428,223 +514,227 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcp
  VCPU control register access routines
 **************************************************************************/
 
-IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
+IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
 {
 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
        // Reads of cr.dcr on Xen always have the sign bit set, so
        // a domain can differentiate whether it is running on SP or not
-       *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
-{
-    if(VMX_DOMAIN(vcpu)){
-       *pval = PSCB(vcpu,iva) & ~0x7fffL;
-    }else{
-        *pval = PSCBX(vcpu,iva) & ~0x7fffL;
-    }
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,pta);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
+       *pval = PSCBX(vcpu, dcr) | 0x8000000000000000L;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
+{
+       if (VMX_DOMAIN(vcpu))
+               *pval = PSCB(vcpu, iva) & ~0x7fffL;
+       else
+               *pval = PSCBX(vcpu, iva) & ~0x7fffL;
+
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, pta);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
 {
        //REGS *regs = vcpu_regs(vcpu);
        //*pval = regs->cr_ipsr;
-       *pval = PSCB(vcpu,ipsr);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,isr);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
+       *pval = PSCB(vcpu, ipsr);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, isr);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
 {
        //REGS *regs = vcpu_regs(vcpu);
        //*pval = regs->cr_iip;
-       *pval = PSCB(vcpu,iip);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
+       *pval = PSCB(vcpu, iip);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
 {
        PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
-       *pval = PSCB(vcpu,ifa);
-       return (IA64_NO_FAULT);
-}
-
-unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
+       *pval = PSCB(vcpu, ifa);
+       return IA64_NO_FAULT;
+}
+
+unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
 {
        ia64_rr rr;
 
-       rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
-       return(rr.ps);
-}
-
-unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
+       rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
+       return rr.ps;
+}
+
+unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
 {
        ia64_rr rr;
 
-       rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
-       return(rr.rid);
-}
-
-unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
+       rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
+       return rr.rid;
+}
+
+unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
 {
        ia64_rr rr;
 
        rr.rrval = 0;
-       rr.ps = vcpu_get_rr_ps(vcpu,ifa);
-       rr.rid = vcpu_get_rr_rid(vcpu,ifa);
-       return (rr.rrval);
-}
-
-
-IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
-{
-       UINT64 val = PSCB(vcpu,itir);
+       rr.ps = vcpu_get_rr_ps(vcpu, ifa);
+       rr.rid = vcpu_get_rr_rid(vcpu, ifa);
+       return rr.rrval;
+}
+
+IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
+{
+       u64 val = PSCB(vcpu, itir);
        *pval = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
-{
-       UINT64 val = PSCB(vcpu,iipa);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
+{
+       u64 val = PSCB(vcpu, iipa);
        // SP entry code does not save iipa yet nor does it get
        //  properly delivered in the pscb
 //     printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
        *pval = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
 {
        //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
        //*pval = PSCB(vcpu,regs).cr_ifs;
-       *pval = PSCB(vcpu,ifs);
-       PSCB(vcpu,incomplete_regframe) = 0;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
-{
-       UINT64 val = PSCB(vcpu,iim);
+       *pval = PSCB(vcpu, ifs);
+       PSCB(vcpu, incomplete_regframe) = 0;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
+{
+       u64 val = PSCB(vcpu, iim);
        *pval = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
 {
        PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
-       *pval = PSCB(vcpu,iha);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
+       *pval = PSCB(vcpu, iha);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
 {
        // Reads of cr.dcr on SP always have the sign bit set, so
        // a domain can differentiate whether it is running on SP or not
        // Thus, writes of DCR should ignore the sign bit
 //verbose("vcpu_set_dcr: called\n");
-       PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
-{
-    if(VMX_DOMAIN(vcpu)){
-       PSCB(vcpu,iva) = val & ~0x7fffL;
-    }else{
-        PSCBX(vcpu,iva) = val & ~0x7fffL;
-    }
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
+       PSCBX(vcpu, dcr) = val & ~0x8000000000000000L;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
+{
+       if (VMX_DOMAIN(vcpu))
+               PSCB(vcpu, iva) = val & ~0x7fffL;
+       else
+               PSCBX(vcpu, iva) = val & ~0x7fffL;
+
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
 {
        if (val & IA64_PTA_LFMT) {
                printf("*** No support for VHPT long format yet!!\n");
-               return (IA64_ILLOP_FAULT);
-       }
-       if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
-       if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
-       PSCB(vcpu,pta) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,ipsr) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,isr) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,iip) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_increment_iip(VCPU *vcpu)
+               return IA64_ILLOP_FAULT;
+       }
+       if (val & (0x3f << 9))  /* reserved fields */
+               return IA64_RSVDREG_FAULT;
+       if (val & 2)            /* reserved fields */
+               return IA64_RSVDREG_FAULT;
+       PSCB(vcpu, pta) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, ipsr) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, isr) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, iip) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_increment_iip(VCPU * vcpu)
 {
        REGS *regs = vcpu_regs(vcpu);
        struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-       if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
-       else ipsr->ri++;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,ifa) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,itir) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
+       if (ipsr->ri == 2) {
+               ipsr->ri = 0;
+               regs->cr_iip += 16;
+       } else
+               ipsr->ri++;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, ifa) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, itir) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
 {
        // SP entry code does not save iipa yet nor does it get
        //  properly delivered in the pscb
 //     printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
-       PSCB(vcpu,iipa) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
+       PSCB(vcpu, iipa) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
 {
        //REGS *regs = vcpu_regs(vcpu);
-       PSCB(vcpu,ifs) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,iim) = val;
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
-{
-       PSCB(vcpu,iha) = val;
+       PSCB(vcpu, ifs) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, iim) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
+{
+       PSCB(vcpu, iha) = val;
        return IA64_NO_FAULT;
 }
 
@@ -652,12 +742,12 @@ IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT6
  VCPU interrupt control register access routines
 **************************************************************************/
 
-void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
-{
-       PSCB(vcpu,pending_interruption) = 1;
-}
-
-void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
+void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
+{
+       PSCB(vcpu, pending_interruption) = 1;
+}
+
+void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
 {
        if (vector & ~0xff) {
                printf("vcpu_pend_interrupt: bad vector\n");
@@ -665,15 +755,16 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
        }
 
        if (vcpu->arch.event_callback_ip) {
-               printf("Deprecated interface. Move to new event based 
solution\n");
+               printf("Deprecated interface. Move to new event based "
+                      "solution\n");
                return;
        }
-               
-       if ( VMX_DOMAIN(vcpu) ) {
-               set_bit(vector,VCPU(vcpu,irr));
+
+       if (VMX_DOMAIN(vcpu)) {
+               set_bit(vector, VCPU(vcpu, irr));
        } else {
-               set_bit(vector,PSCBX(vcpu,irr));
-               PSCB(vcpu,pending_interruption) = 1;
+               set_bit(vector, PSCBX(vcpu, irr));
+               PSCB(vcpu, pending_interruption) = 1;
        }
 }
 
@@ -686,9 +777,9 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
  * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
  * this routine also ignores pscb.interrupt_delivery_enabled
  * and this must be checked independently; see vcpu_deliverable interrupts() */
-UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
-{
-       UINT64 *p, *r, bits, bitnum, mask, i, vector;
+u64 vcpu_check_pending_interrupts(VCPU * vcpu)
+{
+       u64 *p, *r, bits, bitnum, mask, i, vector;
 
        if (vcpu->arch.event_callback_ip)
                return SPURIOUS_VECTOR;
@@ -697,38 +788,41 @@ UINT64 vcpu_check_pending_interrupts(VCP
         * event injection without handle. Later guest may throw out
         * the event itself.
         */
-check_start:
-       if (event_pending(vcpu) && 
-               !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
-                       &PSCBX(vcpu, insvc[0])))
-               vcpu_pend_interrupt(vcpu, 
vcpu->domain->shared_info->arch.evtchn_vector);
-
-       p = &PSCBX(vcpu,irr[3]);
-       r = &PSCBX(vcpu,insvc[3]);
-       for (i = 3; ; p--, r--, i--) {
-               bits = *p ;
-               if (bits) break; // got a potential interrupt
+ check_start:
+       if (event_pending(vcpu) &&
+           !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
+                     &PSCBX(vcpu, insvc[0])))
+               vcpu_pend_interrupt(vcpu,
+                                   vcpu->domain->shared_info->arch.
+                                   evtchn_vector);
+
+       p = &PSCBX(vcpu, irr[3]);
+       r = &PSCBX(vcpu, insvc[3]);
+       for (i = 3 ;; p--, r--, i--) {
+               bits = *p;
+               if (bits)
+                       break;  // got a potential interrupt
                if (*r) {
                        // nothing in this word which is pending+inservice
                        // but there is one inservice which masks lower
                        return SPURIOUS_VECTOR;
                }
                if (i == 0) {
-               // checked all bits... nothing pending+inservice
+                       // checked all bits... nothing pending+inservice
                        return SPURIOUS_VECTOR;
                }
        }
        // have a pending,deliverable interrupt... see if it is masked
        bitnum = ia64_fls(bits);
 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
-       vector = bitnum+(i*64);
+       vector = bitnum + (i * 64);
        mask = 1L << bitnum;
        /* sanity check for guest timer interrupt */
-       if (vector == (PSCB(vcpu,itv) & 0xff)) {
+       if (vector == (PSCB(vcpu, itv) & 0xff)) {
                uint64_t now = ia64_get_itc();
-               if (now < PSCBX(vcpu,domain_itm)) {
+               if (now < PSCBX(vcpu, domain_itm)) {
 //                     printk("Ooops, pending guest timer before its due\n");
-                       PSCBX(vcpu,irr[i]) &= ~mask;
+                       PSCBX(vcpu, irr[i]) &= ~mask;
                        goto check_start;
                }
        }
@@ -738,48 +832,47 @@ check_start:
 //printf("but masked by equal inservice\n");
                return SPURIOUS_VECTOR;
        }
-       if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
+       if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
                // tpr.mmi is set
 //printf("but masked by tpr.mmi\n");
                return SPURIOUS_VECTOR;
        }
-       if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
+       if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
                //tpr.mic masks class
 //printf("but masked by tpr.mic\n");
                return SPURIOUS_VECTOR;
        }
-
 //printf("returned to caller\n");
        return vector;
 }
 
-UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
+u64 vcpu_deliverable_interrupts(VCPU * vcpu)
 {
        return (vcpu_get_psr_i(vcpu) &&
                vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
 }
 
-UINT64 vcpu_deliverable_timer(VCPU *vcpu)
+u64 vcpu_deliverable_timer(VCPU * vcpu)
 {
        return (vcpu_get_psr_i(vcpu) &&
-               vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
-}
-
-IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
+               vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
+}
+
+IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
 {
        /* Use EID=0, ID=vcpu_id.  */
        *pval = vcpu->vcpu_id << 24;
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
+IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
 {
        int i;
-       UINT64 vector, mask;
+       u64 vector, mask;
 
 #define HEARTBEAT_FREQ 16      // period in seconds
 #ifdef HEARTBEAT_FREQ
-#define N_DOMS 16      // period in seconds
+#define N_DOMS 16              // period in seconds
 #if 0
        static long count[N_DOMS] = { 0 };
 #endif
@@ -791,257 +884,269 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
        static char firsttime[256];
        if (firstivr) {
                int i;
-               for (i=0;i<256;i++) firsttime[i]=1;
-               firstivr=0;
+               for (i = 0; i < 256; i++)
+                       firsttime[i] = 1;
+               firstivr = 0;
        }
 #endif
 
        vector = vcpu_check_pending_interrupts(vcpu);
        if (vector == SPURIOUS_VECTOR) {
-               PSCB(vcpu,pending_interruption) = 0;
+               PSCB(vcpu, pending_interruption) = 0;
                *pval = vector;
                return IA64_NO_FAULT;
        }
 #ifdef HEARTBEAT_FREQ
-       if (domid >= N_DOMS) domid = N_DOMS-1;
+       if (domid >= N_DOMS)
+               domid = N_DOMS - 1;
 #if 0
-       if (vector == (PSCB(vcpu,itv) & 0xff)) {
-           if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
-               printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
-                       domid, count[domid], nonclockcount[domid]);
-               //count[domid] = 0;
-               //dump_runq();
-           }
+       if (vector == (PSCB(vcpu, itv) & 0xff)) {
+               if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
+                       printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
+                              domid, count[domid], nonclockcount[domid]);
+                       //count[domid] = 0;
+                       //dump_runq();
+               }
        }
 #endif
-       else nonclockcount[domid]++;
+       else
+               nonclockcount[domid]++;
 #endif
        // now have an unmasked, pending, deliverable vector!
        // getting ivr has "side effects"
 #ifdef IRQ_DEBUG
        if (firsttime[vector]) {
                printf("*** First get_ivr on vector=%lu,itc=%lx\n",
-                       vector,ia64_get_itc());
-               firsttime[vector]=0;
+                      vector, ia64_get_itc());
+               firsttime[vector] = 0;
        }
 #endif
        /* if delivering a timer interrupt, remember domain_itm, which
         * needs to be done before clearing irr
         */
-       if (vector == (PSCB(vcpu,itv) & 0xff)) {
-               PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
+       if (vector == (PSCB(vcpu, itv) & 0xff)) {
+               PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
        }
 
        i = vector >> 6;
        mask = 1L << (vector & 0x3f);
 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
-       PSCBX(vcpu,insvc[i]) |= mask;
-       PSCBX(vcpu,irr[i]) &= ~mask;
+       PSCBX(vcpu, insvc[i]) |= mask;
+       PSCBX(vcpu, irr[i]) &= ~mask;
        //PSCB(vcpu,pending_interruption)--;
        *pval = vector;
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,tpr);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = 0L;  // reads of eoi always return 0
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
+IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, tpr);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
+{
+       *pval = 0L;             // reads of eoi always return 0
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
 {
        *pval = PSCBX(vcpu, irr[0]);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
 {
        *pval = PSCBX(vcpu, irr[1]);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
 {
        *pval = PSCBX(vcpu, irr[2]);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
 {
        *pval = PSCBX(vcpu, irr[3]);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,itv);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,pmv);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
-{
-       *pval = PSCB(vcpu,cmcv);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, itv);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, pmv);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
+{
+       *pval = PSCB(vcpu, cmcv);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
 {
        // fix this when setting values other than m-bit is supported
        printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
        *pval = (1L << 16);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
 {
        // fix this when setting values other than m-bit is supported
        printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
        *pval = (1L << 16);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
 {
        printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
-       return (IA64_ILLOP_FAULT);
-}
-
-IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
-{
-       if (val & 0xff00) return IA64_RSVDREG_FAULT;
-       PSCB(vcpu,tpr) = val;
+       return IA64_ILLOP_FAULT;
+}
+
+IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
+{
+       if (val & 0xff00)
+               return IA64_RSVDREG_FAULT;
+       PSCB(vcpu, tpr) = val;
        /* This can unmask interrupts.  */
        if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
-               PSCB(vcpu,pending_interruption) = 1;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
-{
-       UINT64 *p, bits, vec, bitnum;
+               PSCB(vcpu, pending_interruption) = 1;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
+{
+       u64 *p, bits, vec, bitnum;
        int i;
 
-       p = &PSCBX(vcpu,insvc[3]);
-       for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
+       p = &PSCBX(vcpu, insvc[3]);
+       for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
+               ;
        if (i < 0) {
                printf("Trying to EOI interrupt when none are in-service.\n");
                return IA64_NO_FAULT;
        }
        bitnum = ia64_fls(bits);
-       vec = bitnum + (i*64);
+       vec = bitnum + (i * 64);
        /* clear the correct bit */
        bits &= ~(1L << bitnum);
        *p = bits;
        /* clearing an eoi bit may unmask another pending interrupt... */
-       if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
+       if (!vcpu->vcpu_info->evtchn_upcall_mask) {     // but only if 
enabled...
                // worry about this later... Linux only calls eoi
                // with interrupts disabled
                printf("Trying to EOI interrupt with interrupts enabled\n");
        }
        if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
-               PSCB(vcpu,pending_interruption) = 1;
+               PSCB(vcpu, pending_interruption) = 1;
 //printf("YYYYY vcpu_set_eoi: Successful\n");
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
 {
        if (!(val & (1L << 16))) {
                printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
-               return (IA64_ILLOP_FAULT);
+               return IA64_ILLOP_FAULT;
        }
        // no place to save this state but nothing to do anyway
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
 {
        if (!(val & (1L << 16))) {
                printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
-               return (IA64_ILLOP_FAULT);
+               return IA64_ILLOP_FAULT;
        }
        // no place to save this state but nothing to do anyway
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
 {
        /* Check reserved fields.  */
        if (val & 0xef00)
-               return (IA64_ILLOP_FAULT);
-       PSCB(vcpu,itv) = val;
+               return IA64_ILLOP_FAULT;
+       PSCB(vcpu, itv) = val;
        if (val & 0x10000) {
                /* Disable itm.  */
-               PSCBX(vcpu,domain_itm) = 0;
-       }
-       else vcpu_set_next_timer(vcpu);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
-{
-       if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
-       PSCB(vcpu,pmv) = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
-{
-       if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
-       PSCB(vcpu,cmcv) = val;
-       return (IA64_NO_FAULT);
+               PSCBX(vcpu, domain_itm) = 0;
+       } else
+               vcpu_set_next_timer(vcpu);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
+{
+       if (val & 0xef00)       /* reserved fields */
+               return IA64_RSVDREG_FAULT;
+       PSCB(vcpu, pmv) = val;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
+{
+       if (val & 0xef00)       /* reserved fields */
+               return IA64_RSVDREG_FAULT;
+       PSCB(vcpu, cmcv) = val;
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU temporary register access routines
 **************************************************************************/
-UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
-{
-       if (index > 7) return 0;
-       return PSCB(vcpu,tmp[index]);
-}
-
-void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
-{
-       if (index <= 7) PSCB(vcpu,tmp[index]) = val;
+u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
+{
+       if (index > 7)
+               return 0;
+       return PSCB(vcpu, tmp[index]);
+}
+
+void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
+{
+       if (index <= 7)
+               PSCB(vcpu, tmp[index]) = val;
 }
 
 /**************************************************************************
 Interval timer routines
 **************************************************************************/
 
-BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
-{
-       UINT64 itv = PSCB(vcpu,itv);
-       return(!itv || !!(itv & 0x10000));
-}
-
-BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
-{
-       UINT64 itv = PSCB(vcpu,itv);
-       return (test_bit(itv, PSCBX(vcpu,insvc)));
-}
-
-BOOLEAN vcpu_timer_expired(VCPU *vcpu)
-{
-       unsigned long domain_itm = PSCBX(vcpu,domain_itm);
+BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
+{
+       u64 itv = PSCB(vcpu, itv);
+       return (!itv || !!(itv & 0x10000));
+}
+
+BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
+{
+       u64 itv = PSCB(vcpu, itv);
+       return test_bit(itv, PSCBX(vcpu, insvc));
+}
+
+BOOLEAN vcpu_timer_expired(VCPU * vcpu)
+{
+       unsigned long domain_itm = PSCBX(vcpu, domain_itm);
        unsigned long now = ia64_get_itc();
 
-       if (!domain_itm) return FALSE;
-       if (now < domain_itm) return FALSE;
-       if (vcpu_timer_disabled(vcpu)) return FALSE;
+       if (!domain_itm)
+               return FALSE;
+       if (now < domain_itm)
+               return FALSE;
+       if (vcpu_timer_disabled(vcpu))
+               return FALSE;
        return TRUE;
 }
 
@@ -1049,25 +1154,26 @@ void vcpu_safe_set_itm(unsigned long val
 {
        unsigned long epsilon = 100;
        unsigned long flags;
-       UINT64 now = ia64_get_itc();
+       u64 now = ia64_get_itc();
 
        local_irq_save(flags);
        while (1) {
 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
                ia64_set_itm(val);
-               if (val > (now = ia64_get_itc())) break;
+               if (val > (now = ia64_get_itc()))
+                       break;
                val = now + epsilon;
                epsilon <<= 1;
        }
        local_irq_restore(flags);
 }
 
-void vcpu_set_next_timer(VCPU *vcpu)
-{
-       UINT64 d = PSCBX(vcpu,domain_itm);
-       //UINT64 s = PSCBX(vcpu,xen_itm);
-       UINT64 s = local_cpu_data->itm_next;
-       UINT64 now = ia64_get_itc();
+void vcpu_set_next_timer(VCPU * vcpu)
+{
+       u64 d = PSCBX(vcpu, domain_itm);
+       //u64 s = PSCBX(vcpu,xen_itm);
+       u64 s = local_cpu_data->itm_next;
+       u64 now = ia64_get_itc();
 
        /* gloss over the wraparound problem for now... we know it exists
         * but it doesn't matter right now */
@@ -1081,25 +1187,24 @@ void vcpu_set_next_timer(VCPU *vcpu)
        if (d && (d > now) && (d < s)) {
                vcpu_safe_set_itm(d);
                //using_domain_as_itm++;
-       }
-       else {
+       } else {
                vcpu_safe_set_itm(s);
                //using_xen_as_itm++;
        }
 }
 
-IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
+IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
 {
        //UINT now = ia64_get_itc();
 
        //if (val < now) val = now + 1000;
 //printf("*** vcpu_set_itm: called with %lx\n",val);
-       PSCBX(vcpu,domain_itm) = val;
+       PSCBX(vcpu, domain_itm) = val;
        vcpu_set_next_timer(vcpu);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
 {
 #define DISALLOW_SETTING_ITC_FOR_NOW
 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
@@ -1110,58 +1215,59 @@ IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT6
                did_print = 1;
        }
 #else
-       UINT64 oldnow = ia64_get_itc();
-       UINT64 olditm = PSCBX(vcpu,domain_itm);
+       u64 oldnow = ia64_get_itc();
+       u64 olditm = PSCBX(vcpu, domain_itm);
        unsigned long d = olditm - oldnow;
        unsigned long x = local_cpu_data->itm_next - oldnow;
 
-       UINT64 newnow = val, min_delta;
+       u64 newnow = val, min_delta;
 
        local_irq_disable();
        if (olditm) {
-printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
-               PSCBX(vcpu,domain_itm) = newnow + d;
+               printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
+                      newnow + d);
+               PSCBX(vcpu, domain_itm) = newnow + d;
        }
        local_cpu_data->itm_next = newnow + x;
-       d = PSCBX(vcpu,domain_itm);
+       d = PSCBX(vcpu, domain_itm);
        x = local_cpu_data->itm_next;
 
        ia64_set_itc(newnow);
        if (d && (d > newnow) && (d < x)) {
                vcpu_safe_set_itm(d);
                //using_domain_as_itm++;
-       }
-       else {
+       } else {
                vcpu_safe_set_itm(x);
                //using_xen_as_itm++;
        }
        local_irq_enable();
 #endif
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
 {
        //FIXME: Implement this
        printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
-       return (IA64_NO_FAULT);
-       //return (IA64_ILLOP_FAULT);
-}
-
-IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
+       return IA64_NO_FAULT;
+       //return IA64_ILLOP_FAULT;
+}
+
+IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
 {
        //TODO: Implement this
        printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
-       return (IA64_ILLOP_FAULT);
-}
-
-void vcpu_pend_timer(VCPU *vcpu)
-{
-       UINT64 itv = PSCB(vcpu,itv) & 0xff;
-
-       if (vcpu_timer_disabled(vcpu)) return;
+       return IA64_ILLOP_FAULT;
+}
+
+void vcpu_pend_timer(VCPU * vcpu)
+{
+       u64 itv = PSCB(vcpu, itv) & 0xff;
+
+       if (vcpu_timer_disabled(vcpu))
+               return;
        //if (vcpu_timer_inservice(vcpu)) return;
-       if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
+       if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
                // already delivered an interrupt for this so
                // don't deliver another
                return;
@@ -1179,13 +1285,15 @@ void vcpu_pend_timer(VCPU *vcpu)
 }
 
 // returns true if ready to deliver a timer interrupt too early
-UINT64 vcpu_timer_pending_early(VCPU *vcpu)
-{
-       UINT64 now = ia64_get_itc();
-       UINT64 itm = PSCBX(vcpu,domain_itm);
-
-       if (vcpu_timer_disabled(vcpu)) return 0;
-       if (!itm) return 0;
+u64 vcpu_timer_pending_early(VCPU * vcpu)
+{
+       u64 now = ia64_get_itc();
+       u64 itm = PSCBX(vcpu, domain_itm);
+
+       if (vcpu_timer_disabled(vcpu))
+               return 0;
+       if (!itm)
+               return 0;
        return (vcpu_deliverable_timer(vcpu) && (now < itm));
 }
 
@@ -1193,120 +1301,129 @@ Privileged operation emulation routines
 Privileged operation emulation routines
 **************************************************************************/
 
-static void
-vcpu_force_tlb_miss(VCPU* vcpu, UINT64 ifa)
+static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
 {
        PSCB(vcpu, ifa) = ifa;
        PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
        vcpu_thash(current, ifa, &PSCB(current, iha));
 }
 
-IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa)
+IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
 {
        vcpu_force_tlb_miss(vcpu, ifa);
-       return (vcpu_get_rr_ve(vcpu, ifa)? IA64_INST_TLB_VECTOR: 
IA64_ALT_INST_TLB_VECTOR);
-}
-
-IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
+       return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
+               IA64_ALT_INST_TLB_VECTOR;
+}
+
+IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
 {
        vcpu_force_tlb_miss(vcpu, ifa);
-       return (vcpu_get_rr_ve(vcpu, ifa)? IA64_DATA_TLB_VECTOR: 
IA64_ALT_DATA_TLB_VECTOR);
-}
-
-IA64FAULT vcpu_rfi(VCPU *vcpu)
+       return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
+               IA64_ALT_DATA_TLB_VECTOR;
+}
+
+IA64FAULT vcpu_rfi(VCPU * vcpu)
 {
        // TODO: Only allowed for current vcpu
        PSR psr;
-       UINT64 int_enable, regspsr = 0;
-       UINT64 ifs;
+       u64 int_enable, regspsr = 0;
+       u64 ifs;
        REGS *regs = vcpu_regs(vcpu);
        extern void dorfirfi(void);
 
-       psr.i64 = PSCB(vcpu,ipsr);
-       if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
+       psr.i64 = PSCB(vcpu, ipsr);
+       if (psr.ia64_psr.cpl < 3)
+               psr.ia64_psr.cpl = 2;
        int_enable = psr.ia64_psr.i;
-       if (psr.ia64_psr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
-       if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) 
vcpu_set_metaphysical_mode(vcpu,FALSE);
-       else vcpu_set_metaphysical_mode(vcpu,TRUE);
-       psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
-       psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
+       if (psr.ia64_psr.ic)
+               PSCB(vcpu, interrupt_collection_enabled) = 1;
+       if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it)
+               vcpu_set_metaphysical_mode(vcpu, FALSE);
+       else
+               vcpu_set_metaphysical_mode(vcpu, TRUE);
+       psr.ia64_psr.ic = 1;
+       psr.ia64_psr.i = 1;
+       psr.ia64_psr.dt = 1;
+       psr.ia64_psr.rt = 1;
+       psr.ia64_psr.it = 1;
        psr.ia64_psr.bn = 1;
        //psr.pk = 1;  // checking pkeys shouldn't be a problem but seems broken
        if (psr.ia64_psr.be) {
                printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
-               return (IA64_ILLOP_FAULT);
-       }
-       PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
-       ifs = PSCB(vcpu,ifs);
+               return IA64_ILLOP_FAULT;
+       }
+       PSCB(vcpu, incomplete_regframe) = 0;    // is this necessary?
+       ifs = PSCB(vcpu, ifs);
        //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != 
regs->cr_ifs) {
        //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
        if (ifs & regs->cr_ifs & 0x8000000000000000L) {
                // TODO: validate PSCB(vcpu,iip)
                // TODO: PSCB(vcpu,ipsr) = psr;
-               PSCB(vcpu,ipsr) = psr.i64;
+               PSCB(vcpu, ipsr) = psr.i64;
                // now set up the trampoline
                regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
-               __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
-               regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | 
IA64_PSR_BN);
-       }
-       else {
+               __asm__ __volatile("mov %0=psr;;":"=r"(regspsr)::"memory");
+               regs->cr_ipsr =
+                   regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
+       } else {
                regs->cr_ipsr = psr.i64;
-               regs->cr_iip = PSCB(vcpu,iip);
-       }
-       PSCB(vcpu,interrupt_collection_enabled) = 1;
+               regs->cr_iip = PSCB(vcpu, iip);
+       }
+       PSCB(vcpu, interrupt_collection_enabled) = 1;
        vcpu_bsw1(vcpu);
        vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_cover(VCPU *vcpu)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_cover(VCPU * vcpu)
 {
        // TODO: Only allowed for current vcpu
        REGS *regs = vcpu_regs(vcpu);
 
-       if (!PSCB(vcpu,interrupt_collection_enabled)) {
-               if (!PSCB(vcpu,incomplete_regframe))
-                       PSCB(vcpu,ifs) = regs->cr_ifs;
-               else PSCB(vcpu,incomplete_regframe) = 0;
+       if (!PSCB(vcpu, interrupt_collection_enabled)) {
+               if (!PSCB(vcpu, incomplete_regframe))
+                       PSCB(vcpu, ifs) = regs->cr_ifs;
+               else
+                       PSCB(vcpu, incomplete_regframe) = 0;
        }
        regs->cr_ifs = 0;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
-{
-       UINT64 pta = PSCB(vcpu,pta);
-       UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
-       UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
-       UINT64 Mask = (1L << pta_sz) - 1;
-       UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
-       UINT64 compMask_60_15 = ~Mask_60_15;
-       UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
-       UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
-       UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
-       UINT64 VHPT_addr2a =
-               ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
-       UINT64 VHPT_addr2b =
-               ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
-       UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
-       UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
-                       VHPT_addr3;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
+{
+       u64 pta = PSCB(vcpu, pta);
+       u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
+       u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
+       u64 Mask = (1L << pta_sz) - 1;
+       u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
+       u64 compMask_60_15 = ~Mask_60_15;
+       u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
+       u64 VHPT_offset = (vadr >> rr_ps) << 3;
+       u64 VHPT_addr1 = vadr & 0xe000000000000000L;
+       u64 VHPT_addr2a =
+           ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
+       u64 VHPT_addr2b =
+           ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
+       u64 VHPT_addr3 = VHPT_offset & 0x7fff;
+       u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
+           VHPT_addr3;
 
 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
        *pval = VHPT_addr;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
 {
        printf("vcpu_ttag: ttag instruction unsupported\n");
-       return (IA64_ILLOP_FAULT);
-}
-
-int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
+       return IA64_ILLOP_FAULT;
+}
+
+int warn_region0_address = 0;  // FIXME later: tie to a boot parameter?
 
 /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps.  */
-static inline int range_overlap (u64 b1, u64 e1, u64 b2, u64 e2)
+static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
 {
        return (b1 <= e2) && (e1 >= b2);
 }
@@ -1314,7 +1431,7 @@ static inline int range_overlap (u64 b1,
 /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
    Note: LSBs of base inside page_size are ignored.  */
 static inline void
-check_xen_space_overlap (const char *func, u64 base, u64 page_size)
+check_xen_space_overlap(const char *func, u64 base, u64 page_size)
 {
        /* Overlaps can occur only in region 7.
           (This is an optimization to bypass all the checks).  */
@@ -1325,43 +1442,42 @@ check_xen_space_overlap (const char *fun
        base &= ~(page_size - 1);
 
        /* FIXME: ideally an MCA should be generated...  */
-       if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
-                          base, base + page_size)
+       if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
+                         base, base + page_size)
            || range_overlap(current->domain->arch.shared_info_va,
-                            current->domain->arch.shared_info_va 
-                            + XSI_SIZE + XMAPPEDREGS_SIZE,
-                            base, base + page_size))
-               panic_domain (NULL, "%s on Xen virtual space (%lx)\n",
-                             func, base);
+                            current->domain->arch.shared_info_va
+                            + XSI_SIZE + XMAPPEDREGS_SIZE,
+                            base, base + page_size))
+               panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
+                            func, base);
 }
 
 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
-static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 
rid)
-{
-       return trp->rid == rid 
-               && ifa >= trp->vadr
-               && ifa <= (trp->vadr + (1L << trp->ps) - 1);
-}
-
-static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
+static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
+                                           u64 rid)
+{
+       return trp->rid == rid
+           && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
+}
+
+static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
 {
        return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
 }
 
 static inline int
-vcpu_match_tr_entry_range(TR_ENTRY *trp, UINT64 rid, u64 b, u64 e)
+vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
 {
        return trp->rid == rid
-               && trp->pte.p
-               && range_overlap (b, e,
-                                 trp->vadr, trp->vadr + (1L << trp->ps) - 1);
-
-}
-
-static TR_ENTRY*
-vcpu_tr_lookup(VCPU* vcpu, unsigned long va, UINT64 rid, BOOLEAN is_data)
-{
-       unsigned char* regions;
+           && trp->pte.p
+           && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
+
+}
+
+static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
+                                BOOLEAN is_data)
+{
+       unsigned char *regions;
        TR_ENTRY *trp;
        int tr_max;
        int i;
@@ -1370,12 +1486,12 @@ vcpu_tr_lookup(VCPU* vcpu, unsigned long
                // data
                regions = &vcpu->arch.dtr_regions;
                trp = vcpu->arch.dtrs;
-               tr_max = sizeof(vcpu->arch.dtrs)/sizeof(vcpu->arch.dtrs[0]);
+               tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
        } else {
                // instruction
                regions = &vcpu->arch.itr_regions;
                trp = vcpu->arch.itrs;
-               tr_max = sizeof(vcpu->arch.itrs)/sizeof(vcpu->arch.itrs[0]);
+               tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
        }
 
        if (!vcpu_quick_region_check(*regions, va)) {
@@ -1393,13 +1509,14 @@ vcpu_tr_lookup(VCPU* vcpu, unsigned long
 // 0: failure
 // 1: success
 int
-vcpu_get_domain_bundle(VCPU* vcpu, REGS* regs, UINT64 gip, IA64_BUNDLE* bundle)
-{
-       UINT64 gpip;// guest pseudo phyiscal ip
+vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
+                       IA64_BUNDLE * bundle)
+{
+       u64 gpip;               // guest pseudo phyiscal ip
        unsigned long vaddr;
-       struct page_info* page;
-
-again:
+       struct page_info *page;
+
+ again:
 #if 0
        // Currently xen doesn't track psr.it bits.
        // it assumes always psr.it = 1.
@@ -1412,7 +1529,7 @@ again:
                unsigned long rr = PSCB(vcpu, rrs)[region];
                unsigned long rid = rr & RR_RID_MASK;
                BOOLEAN swap_rr0;
-               TR_ENTRY* trp;
+               TR_ENTRY *trp;
 
                // vcpu->arch.{i, d}tlb are volatile,
                // copy its value to the variable, tr, before use.
@@ -1427,7 +1544,8 @@ again:
                // Last itc.i value is cached to PSCBX(vcpu, itlb).
                tr = PSCBX(vcpu, itlb);
                if (vcpu_match_tr_entry(&tr, gip, rid)) {
-                       //DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__, gip, 
gpip);
+                       //DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__,
+                       //      gip, gpip);
                        goto found;
                }
                trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
@@ -1457,43 +1575,43 @@ again:
                        return 0;
                }
                return 1;
-        
+
        found:
                gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
                        (gip & ((1 << tr.ps) - 1));
        }
-       
+
        vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
        page = virt_to_page(vaddr);
        if (get_page(page, vcpu->domain) == 0) {
                if (page_get_owner(page) != vcpu->domain) {
                        // This page might be a page granted by another
                        // domain.
-                       panic_domain(regs,
-                                    "domain tries to execute foreign domain "
-                                    "page which might be mapped by grant "
-                                    "table.\n");
+                       panic_domain(regs, "domain tries to execute foreign "
+                                    "domain page which might be mapped by "
+                                    "grant table.\n");
                }
                goto again;
        }
-       *bundle = *((IA64_BUNDLE*)vaddr);
+       *bundle = *((IA64_BUNDLE *) vaddr);
        put_page(page);
        return 1;
 }
 
-IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 
*pteval, UINT64 *itir, UINT64 *iha)
+IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
+                        u64 * pteval, u64 * itir, u64 * iha)
 {
        unsigned long region = address >> 61;
        unsigned long pta, rid, rr;
        union pte_flags pte;
        TR_ENTRY *trp;
 
-       if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
+       if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
                // dom0 may generate an uncacheable physical address (msb=1)
                if (region && ((region != 4) || (vcpu->domain != dom0))) {
 // FIXME: This seems to happen even though it shouldn't.  Need to track
 // this down, but since it has been apparently harmless, just flag it for now
-//                     panic_domain(vcpu_regs(vcpu),
+//                      panic_domain(vcpu_regs(vcpu),
 
                        /*
                         * Guest may execute itc.d and rfi with psr.dt=0
@@ -1501,29 +1619,29 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
                         * At this time PSCB(vcpu,metaphysical_mode)=1,
                         * region=5,VMM need to handle this tlb miss as if
                         * PSCB(vcpu,metaphysical_mode)=0
-                        */           
-                       printk("vcpu_translate: bad physical address: 0x%lx at 
%lx\n",
-                              address, vcpu_regs (vcpu)->cr_iip);
+                        */
+                       printk("vcpu_translate: bad physical address: 0x%lx "
+                              "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
 
                } else {
-                       *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
-                                 _PAGE_PL_2 | _PAGE_AR_RWX;
+                       *pteval = (address & _PAGE_PPN_MASK) |
+                               __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
                        *itir = PAGE_SHIFT << 2;
                        perfc_incrc(phys_translate);
                        return IA64_NO_FAULT;
                }
-       }
-       else if (!region && warn_region0_address) {
+       } else if (!region && warn_region0_address) {
                REGS *regs = vcpu_regs(vcpu);
-               unsigned long viip = PSCB(vcpu,iip);
-               unsigned long vipsr = PSCB(vcpu,ipsr);
+               unsigned long viip = PSCB(vcpu, iip);
+               unsigned long vipsr = PSCB(vcpu, ipsr);
                unsigned long iip = regs->cr_iip;
                unsigned long ipsr = regs->cr_ipsr;
-               printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, 
vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
-                       address, viip, vipsr, iip, ipsr);
-       }
-
-       rr = PSCB(vcpu,rrs)[region];
+               printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
+                      "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
+                      address, viip, vipsr, iip, ipsr);
+       }
+
+       rr = PSCB(vcpu, rrs)[region];
        rid = rr & RR_RID_MASK;
        if (is_data) {
                trp = vcpu_tr_lookup(vcpu, address, rid, 1);
@@ -1535,7 +1653,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
                }
        }
        // FIXME?: check itr's for data accesses too, else bad things happen?
-       /* else */ {
+       /* else */  {
                trp = vcpu_tr_lookup(vcpu, address, rid, 0);
                if (trp != NULL) {
                        *pteval = trp->pte.val;
@@ -1549,8 +1667,8 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        // FIXME?: check dtlb for inst accesses too, else bad things happen?
        trp = &vcpu->arch.dtlb;
        pte = trp->pte;
-       if (/* is_data && */ pte.p
-           && vcpu_match_tr_entry_no_p(trp,address,rid)) {
+       if ( /* is_data && */ pte.p
+           && vcpu_match_tr_entry_no_p(trp, address, rid)) {
                *pteval = pte.val;
                *itir = trp->itir;
                perfc_incrc(dtlb_translate);
@@ -1558,10 +1676,10 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        }
 
        /* check guest VHPT */
-       pta = PSCB(vcpu,pta);
+       pta = PSCB(vcpu, pta);
        if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
-               panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
-               //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
+               panic_domain(vcpu_regs(vcpu), "can't do long format VHPT\n");
+               //return is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR;
        }
 
        *itir = rr & (RR_RID_MASK | RR_PS_MASK);
@@ -1569,24 +1687,25 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        // xenlinux depends on it so should document it as part of PV interface
        vcpu_thash(vcpu, address, iha);
        if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
-               return (is_data ? IA64_ALT_DATA_TLB_VECTOR : 
IA64_ALT_INST_TLB_VECTOR);
+               return is_data ? IA64_ALT_DATA_TLB_VECTOR :
+                       IA64_ALT_INST_TLB_VECTOR;
 
        /* avoid recursively walking (short format) VHPT */
        if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
-               return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
-
-       if (!__access_ok (*iha)
+               return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
+
+       if (!__access_ok(*iha)
            || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
                // virtual VHPT walker "missed" in TLB
                return IA64_VHPT_FAULT;
 
        /*
-       * Optimisation: this VHPT walker aborts on not-present pages
-       * instead of inserting a not-present translation, this allows
-       * vectoring directly to the miss handler.
-       */
+        * Optimisation: this VHPT walker aborts on not-present pages
+        * instead of inserting a not-present translation, this allows
+        * vectoring directly to the miss handler.
+        */
        if (!pte.p)
-               return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
+               return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
 
        /* found mapping in guest VHPT! */
        *itir = rr & RR_PS_MASK;
@@ -1595,25 +1714,24 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
-{
-       UINT64 pteval, itir, mask, iha;
+IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
+{
+       u64 pteval, itir, mask, iha;
        IA64FAULT fault;
 
        fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
-       if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
-       {
+       if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
                mask = itir_mask(itir);
                *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
-               return (IA64_NO_FAULT);
-       }
-       return vcpu_force_data_miss(vcpu,vadr);
-}
-
-IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
+               return IA64_NO_FAULT;
+       }
+       return vcpu_force_data_miss(vcpu, vadr);
+}
+
+IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
 {
        printf("vcpu_tak: tak instruction unsupported\n");
-       return (IA64_ILLOP_FAULT);
+       return IA64_ILLOP_FAULT;
        // HACK ALERT: tak does a thash for now
        //return vcpu_thash(vcpu,vadr,key);
 }
@@ -1622,84 +1740,84 @@ IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 va
  VCPU debug breakpoint register access routines
 **************************************************************************/
 
-IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
+IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
 {
        // TODO: unimplemented DBRs return a reserved register fault
        // TODO: Should set Logical CPU state, not just physical
-       ia64_set_dbr(reg,val);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
+       ia64_set_dbr(reg, val);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
 {
        // TODO: unimplemented IBRs return a reserved register fault
        // TODO: Should set Logical CPU state, not just physical
-       ia64_set_ibr(reg,val);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+       ia64_set_ibr(reg, val);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
 {
        // TODO: unimplemented DBRs return a reserved register fault
-       UINT64 val = ia64_get_dbr(reg);
+       u64 val = ia64_get_dbr(reg);
        *pval = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
 {
        // TODO: unimplemented IBRs return a reserved register fault
-       UINT64 val = ia64_get_ibr(reg);
+       u64 val = ia64_get_ibr(reg);
        *pval = val;
-       return (IA64_NO_FAULT);
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU performance monitor register access routines
 **************************************************************************/
 
-IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
+IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
 {
        // TODO: Should set Logical CPU state, not just physical
        // NOTE: Writes to unimplemented PMC registers are discarded
 #ifdef DEBUG_PFMON
-printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
+       printf("vcpu_set_pmc(%x,%lx)\n", reg, val);
 #endif
-       ia64_set_pmc(reg,val);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
+       ia64_set_pmc(reg, val);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
 {
        // TODO: Should set Logical CPU state, not just physical
        // NOTE: Writes to unimplemented PMD registers are discarded
 #ifdef DEBUG_PFMON
-printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
+       printf("vcpu_set_pmd(%x,%lx)\n", reg, val);
 #endif
-       ia64_set_pmd(reg,val);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+       ia64_set_pmd(reg, val);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
 {
        // NOTE: Reads from unimplemented PMC registers return zero
-       UINT64 val = (UINT64)ia64_get_pmc(reg);
+       u64 val = (u64) ia64_get_pmc(reg);
 #ifdef DEBUG_PFMON
-printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
+       printf("%lx=vcpu_get_pmc(%x)\n", val, reg);
 #endif
        *pval = val;
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
 {
        // NOTE: Reads from unimplemented PMD registers return zero
-       UINT64 val = (UINT64)ia64_get_pmd(reg);
+       u64 val = (u64) ia64_get_pmd(reg);
 #ifdef DEBUG_PFMON
-printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
+       printf("%lx=vcpu_get_pmd(%x)\n", val, reg);
 #endif
        *pval = val;
-       return (IA64_NO_FAULT);
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
@@ -1718,167 +1836,183 @@ do{     \
         "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");    \
 }while(0)
 
-IA64FAULT vcpu_bsw0(VCPU *vcpu)
+IA64FAULT vcpu_bsw0(VCPU * vcpu)
 {
        // TODO: Only allowed for current vcpu
        REGS *regs = vcpu_regs(vcpu);
        unsigned long *r = &regs->r16;
-       unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
-       unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
+       unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
+       unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
        unsigned long *runat = &regs->eml_unat;
-       unsigned long *b0unat = &PSCB(vcpu,vbnat);
-       unsigned long *b1unat = &PSCB(vcpu,vnat);
+       unsigned long *b0unat = &PSCB(vcpu, vbnat);
+       unsigned long *b1unat = &PSCB(vcpu, vnat);
 
        unsigned long i;
 
-    if(VMX_DOMAIN(vcpu)){
-        if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
-            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
-            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
-            VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
-        }
-    }else{
-        if (PSCB(vcpu,banknum)) {
-            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
-            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
-            PSCB(vcpu,banknum) = 0;
-        }
-    }
-       return (IA64_NO_FAULT);
-}
-
-#define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT)     \
-do{             \
-    __asm__ __volatile__ (      \
-        ";;extr.u %0 = %3,%6,16;;\n"                \
-        "dep %1 = %0, %1, 16, 16;;\n"               \
-        "st8 [%4] = %1\n"                           \
-        "extr.u %0 = %2, 0, 16;;\n"                 \
-        "dep %3 = %0, %3, %6, 16;;\n"               \
-        "st8 [%5] = %3\n"                           \
-        ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
-        "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");            \
-}while(0)
-
-IA64FAULT vcpu_bsw1(VCPU *vcpu)
+       if (VMX_DOMAIN(vcpu)) {
+               if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
+                       for (i = 0; i < 16; i++) {
+                               *b1++ = *r;
+                               *r++ = *b0++;
+                       }
+                       vcpu_bsw0_unat(i, b0unat, b1unat, runat,
+                                      IA64_PT_REGS_R16_SLOT);
+                       VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
+               }
+       } else {
+               if (PSCB(vcpu, banknum)) {
+                       for (i = 0; i < 16; i++) {
+                               *b1++ = *r;
+                               *r++ = *b0++;
+                       }
+                       vcpu_bsw0_unat(i, b0unat, b1unat, runat,
+                                      IA64_PT_REGS_R16_SLOT);
+                       PSCB(vcpu, banknum) = 0;
+               }
+       }
+       return IA64_NO_FAULT;
+}
+
+#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT)        
\
+do {                                                                   \
+       __asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n"              \
+                             "dep %1 = %0, %1, 16, 16;;\n"             \
+                             "st8 [%4] = %1\n"                         \
+                             "extr.u %0 = %2, 0, 16;;\n"               \
+                             "dep %3 = %0, %3, %6, 16;;\n"             \
+                             "st8 [%5] = %3\n"                         \
+                             ::"r"(i), "r"(*b0unat), "r"(*b1unat),     \
+                             "r"(*runat), "r"(b0unat), "r"(runat),     \
+                             "i"(IA64_PT_REGS_R16_SLOT): "memory");    \
+} while(0)
+
+IA64FAULT vcpu_bsw1(VCPU * vcpu)
 {
        // TODO: Only allowed for current vcpu
        REGS *regs = vcpu_regs(vcpu);
        unsigned long *r = &regs->r16;
-       unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
-       unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
+       unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
+       unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
        unsigned long *runat = &regs->eml_unat;
-       unsigned long *b0unat = &PSCB(vcpu,vbnat);
-       unsigned long *b1unat = &PSCB(vcpu,vnat);
+       unsigned long *b0unat = &PSCB(vcpu, vbnat);
+       unsigned long *b1unat = &PSCB(vcpu, vnat);
 
        unsigned long i;
 
-    if(VMX_DOMAIN(vcpu)){
-        if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
-            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
-            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
-            VCPU(vcpu,vpsr) |= IA64_PSR_BN;
-        }
-    }else{
-        if (!PSCB(vcpu,banknum)) {
-            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
-            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
-            PSCB(vcpu,banknum) = 1;
-        }
-    }
-       return (IA64_NO_FAULT);
+       if (VMX_DOMAIN(vcpu)) {
+               if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
+                       for (i = 0; i < 16; i++) {
+                               *b0++ = *r;
+                               *r++ = *b1++;
+                       }
+                       vcpu_bsw1_unat(i, b0unat, b1unat, runat,
+                                      IA64_PT_REGS_R16_SLOT);
+                       VCPU(vcpu, vpsr) |= IA64_PSR_BN;
+               }
+       } else {
+               if (!PSCB(vcpu, banknum)) {
+                       for (i = 0; i < 16; i++) {
+                               *b0++ = *r;
+                               *r++ = *b1++;
+                       }
+                       vcpu_bsw1_unat(i, b0unat, b1unat, runat,
+                                      IA64_PT_REGS_R16_SLOT);
+                       PSCB(vcpu, banknum) = 1;
+               }
+       }
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU cpuid access routines
 **************************************************************************/
 
-
-IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
 {
        // FIXME: This could get called as a result of a rsvd-reg fault
        // if reg > 3
-       switch(reg) {
-           case 0:
-               memcpy(pval,"Xen/ia64",8);
+       switch (reg) {
+       case 0:
+               memcpy(pval, "Xen/ia64", 8);
                break;
-           case 1:
+       case 1:
                *pval = 0;
                break;
-           case 2:
+       case 2:
                *pval = 0;
                break;
-           case 3:
+       case 3:
                *pval = ia64_get_cpuid(3);
                break;
-           case 4:
+       case 4:
                *pval = ia64_get_cpuid(4);
                break;
-           default:
+       default:
                if (reg > (ia64_get_cpuid(3) & 0xff))
                        return IA64_RSVDREG_FAULT;
                *pval = ia64_get_cpuid(reg);
                break;
        }
-       return (IA64_NO_FAULT);
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU region register access routines
 **************************************************************************/
 
-unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
+unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
 {
        ia64_rr rr;
 
-       rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
-       return(rr.ve);
-}
-
-IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-       PSCB(vcpu,rrs)[reg>>61] = val;
+       rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
+       return rr.ve;
+}
+
+IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
+{
+       PSCB(vcpu, rrs)[reg >> 61] = val;
        // warning: set_one_rr() does it "live"
-       set_one_rr(reg,val);
-       return (IA64_NO_FAULT);
-}
-
-IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-       if(VMX_DOMAIN(vcpu)){
-               *pval = VMX(vcpu,vrr[reg>>61]);
-       }else{
-               *pval = PSCB(vcpu,rrs)[reg>>61];
-       }
-       return (IA64_NO_FAULT);
+       set_one_rr(reg, val);
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       if (VMX_DOMAIN(vcpu))
+               *pval = VMX(vcpu, vrr[reg >> 61]);
+       else
+               *pval = PSCB(vcpu, rrs)[reg >> 61];
+
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU protection key register access routines
 **************************************************************************/
 
-IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
 {
 #ifndef PKR_USE_FIXED
        printk("vcpu_get_pkr: called, not implemented yet\n");
        return IA64_ILLOP_FAULT;
 #else
-       UINT64 val = (UINT64)ia64_get_pkr(reg);
+       u64 val = (u64) ia64_get_pkr(reg);
        *pval = val;
-       return (IA64_NO_FAULT);
+       return IA64_NO_FAULT;
 #endif
 }
 
-IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
+IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
 {
 #ifndef PKR_USE_FIXED
        printk("vcpu_set_pkr: called, not implemented yet\n");
        return IA64_ILLOP_FAULT;
 #else
-//     if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
+//      if (reg >= NPKRS)
+//             return IA64_ILLOP_FAULT;
        vcpu->pkrs[reg] = val;
-       ia64_set_pkr(reg,val);
-       return (IA64_NO_FAULT);
+       ia64_set_pkr(reg, val);
+       return IA64_NO_FAULT;
 #endif
 }
 
@@ -1887,21 +2021,22 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
 **************************************************************************/
 
 static void
-vcpu_set_tr_entry_rid(TR_ENTRY *trp, UINT64 pte,
-                      UINT64 itir, UINT64 ifa, UINT64 rid)
-{
-       UINT64 ps;
+vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
+                      u64 itir, u64 ifa, u64 rid)
+{
+       u64 ps;
        union pte_flags new_pte;
 
        trp->itir = itir;
        trp->rid = rid;
        ps = trp->ps;
        new_pte.val = pte;
-       if (new_pte.pl < 2) new_pte.pl = 2;
+       if (new_pte.pl < 2)
+               new_pte.pl = 2;
        trp->vadr = ifa & ~0xfff;
-       if (ps > 12) { // "ignore" relevant low-order bits
-               new_pte.ppn &= ~((1UL<<(ps-12))-1);
-               trp->vadr &= ~((1UL<<ps)-1);
+       if (ps > 12) {          // "ignore" relevant low-order bits
+               new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
+               trp->vadr &= ~((1UL << ps) - 1);
        }
 
        /* Atomic write.  */
@@ -1909,25 +2044,26 @@ vcpu_set_tr_entry_rid(TR_ENTRY *trp, UIN
 }
 
 static inline void
-vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
+vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
 {
        vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
-                             VCPU(current, rrs[ifa>>61]) & RR_RID_MASK);
-}
-
-IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
-                     UINT64 itir, UINT64 ifa)
+                             VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
+}
+
+IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
+                     u64 itir, u64 ifa)
 {
        TR_ENTRY *trp;
 
-       if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
+       if (slot >= NDTRS)
+               return IA64_RSVDREG_FAULT;
 
        vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
 
-       trp = &PSCBX(vcpu,dtrs[slot]);
+       trp = &PSCBX(vcpu, dtrs[slot]);
 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
-       vcpu_set_tr_entry(trp,pte,itir,ifa);
-       vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
+       vcpu_set_tr_entry(trp, pte, itir, ifa);
+       vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
 
        /*
         * FIXME According to spec, vhpt should be purged, but this
@@ -1941,19 +2077,20 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
-                     UINT64 itir, UINT64 ifa)
+IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
+                     u64 itir, u64 ifa)
 {
        TR_ENTRY *trp;
 
-       if (slot >= NITRS) return IA64_RSVDREG_FAULT;
+       if (slot >= NITRS)
+               return IA64_RSVDREG_FAULT;
 
        vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
 
-       trp = &PSCBX(vcpu,itrs[slot]);
+       trp = &PSCBX(vcpu, itrs[slot]);
 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
-       vcpu_set_tr_entry(trp,pte,itir,ifa);
-       vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
+       vcpu_set_tr_entry(trp, pte, itir, ifa);
+       vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
 
        /*
         * FIXME According to spec, vhpt should be purged, but this
@@ -1967,13 +2104,13 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot, u64 pte,
+IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
                        u64 itir, u64 ifa, u64 rid)
 {
        TR_ENTRY *trp;
 
        if (slot >= NITRS)
-               return IA64_RSVDREG_FAULT;
+               return IA64_RSVDREG_FAULT;
        trp = &PSCBX(vcpu, itrs[slot]);
        vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
 
@@ -1986,7 +2123,7 @@ IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 s
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot, u64 pte,
+IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
                        u64 itir, u64 ifa, u64 rid)
 {
        TR_ENTRY *trp;
@@ -2010,65 +2147,71 @@ IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 s
 **************************************************************************/
 
 void
-vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte,
-                 UINT64 mp_pte, UINT64 logps, struct p2m_entry* entry)
+vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
+                 u64 mp_pte, u64 logps, struct p2m_entry *entry)
 {
        unsigned long psr;
-       unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
-
-       check_xen_space_overlap ("itc", vaddr, 1UL << logps);
+       unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT;
+
+       check_xen_space_overlap("itc", vaddr, 1UL << logps);
 
        // FIXME, must be inlined or potential for nested fault here!
-       if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT))
-               panic_domain (NULL, "vcpu_itc_no_srlz: domain trying to use "
-                             "smaller page size!\n");
+       if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT))
+               panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
+                            "smaller page size!\n");
 
        BUG_ON(logps > PAGE_SHIFT);
        vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
        psr = ia64_clear_ic();
-       ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
+       ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings
        ia64_set_psr(psr);
        // ia64_srlz_i(); // no srls req'd, will rfi later
 #ifdef VHPT_GLOBAL
-       if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
+       if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
                // FIXME: this is dangerous... vhpt_flush_address ensures these
                // addresses never get flushed.  More work needed if this
                // ever happens.
 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
-               if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
-               else vhpt_insert(vaddr,pte,logps<<2);
+               if (logps > PAGE_SHIFT)
+                       vhpt_multiple_insert(vaddr, pte, logps);
+               else
+                       vhpt_insert(vaddr, pte, logps << 2);
        }
        // even if domain pagesize is larger than PAGE_SIZE, just put
        // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
-       else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
+       else
+               vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
 #endif
-       if (IorD & 0x4) /* don't place in 1-entry TLB */
+       if (IorD & 0x4)         /* don't place in 1-entry TLB */
                return;
        if (IorD & 0x1) {
-               vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
+               vcpu_set_tr_entry(&PSCBX(vcpu, itlb), mp_pte, ps << 2, vaddr);
        }
        if (IorD & 0x2) {
-               vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
-       }
-}
-
-IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
+               vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), mp_pte, ps << 2, vaddr);
+       }
+}
+
+IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
 {
        unsigned long pteval, logps = itir_ps(itir);
-       BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
+       BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
        struct p2m_entry entry;
 
        if (logps < PAGE_SHIFT)
-               panic_domain (NULL, "vcpu_itc_d: domain trying to use "
-                             "smaller page size!\n");
-
-again:
+               panic_domain(NULL, "vcpu_itc_d: domain trying to use "
+                            "smaller page size!\n");
+
+ again:
        //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
        pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
-       if (!pteval) return IA64_ILLOP_FAULT;
-       if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
+       if (!pteval)
+               return IA64_ILLOP_FAULT;
+       if (swap_rr0)
+               set_one_rr(0x0, PSCB(vcpu, rrs[0]));
        vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry);
-       if (swap_rr0) set_metaphysical_rr0();
+       if (swap_rr0)
+               set_metaphysical_rr0();
        if (p2m_entry_retry(&entry)) {
                vcpu_flush_tlb_vhpt_range(ifa, logps);
                goto again;
@@ -2076,22 +2219,25 @@ again:
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
+IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
 {
        unsigned long pteval, logps = itir_ps(itir);
-       BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
+       BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
        struct p2m_entry entry;
 
        if (logps < PAGE_SHIFT)
-               panic_domain (NULL, "vcpu_itc_i: domain trying to use "
-                             "smaller page size!\n");
-again:
+               panic_domain(NULL, "vcpu_itc_i: domain trying to use "
+                            "smaller page size!\n");
+      again:
        //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
        pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
-       if (!pteval) return IA64_ILLOP_FAULT;
-       if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
+       if (!pteval)
+               return IA64_ILLOP_FAULT;
+       if (swap_rr0)
+               set_one_rr(0x0, PSCB(vcpu, rrs[0]));
        vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry);
-       if (swap_rr0) set_metaphysical_rr0();
+       if (swap_rr0)
+               set_metaphysical_rr0();
        if (p2m_entry_retry(&entry)) {
                vcpu_flush_tlb_vhpt_range(ifa, logps);
                goto again;
@@ -2099,18 +2245,18 @@ again:
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range)
+IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
 {
        BUG_ON(vcpu != current);
 
-       check_xen_space_overlap ("ptc_l", vadr, 1UL << log_range);
+       check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
 
        /* Purge TC  */
-       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
-       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
-       
+       vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
+       vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
+
        /* Purge all tlb and vhpt */
-       vcpu_flush_tlb_vhpt_range (vadr, log_range);
+       vcpu_flush_tlb_vhpt_range(vadr, log_range);
 
        return IA64_NO_FAULT;
 }
@@ -2121,13 +2267,13 @@ IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 
 // access rights fault, we have to translate the virtual address to a
 // physical address (possibly via a metaphysical address) and do the fc
 // on the physical address, which is guaranteed to flush the same cache line
-IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
+IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
 {
        // TODO: Only allowed for current vcpu
-       UINT64 mpaddr, paddr;
+       u64 mpaddr, paddr;
        IA64FAULT fault;
 
-again:
+      again:
        fault = vcpu_tpa(vcpu, vadr, &mpaddr);
        if (fault == IA64_NO_FAULT) {
                struct p2m_entry entry;
@@ -2139,7 +2285,7 @@ again:
        return fault;
 }
 
-IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
+IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
 {
        // Note that this only needs to be called once, i.e. the
        // architected loop to purge the entire TLB, should use
@@ -2150,27 +2296,27 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
+IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
 {
        printk("vcpu_ptc_g: called, not implemented yet\n");
        return IA64_ILLOP_FAULT;
 }
 
-IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
+IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
 {
        // FIXME: validate not flushing Xen addresses
        // if (Xen address) return(IA64_ILLOP_FAULT);
        // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
 
-       check_xen_space_overlap ("ptc_ga", vadr, addr_range);
-
-       domain_flush_vtlb_range (vcpu->domain, vadr, addr_range);
-
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
+       check_xen_space_overlap("ptc_ga", vadr, addr_range);
+
+       domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
+
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
 {
        unsigned long region = vadr >> 61;
        u64 addr_range = 1UL << log_range;
@@ -2179,29 +2325,30 @@ IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 v
        TR_ENTRY *trp;
 
        BUG_ON(vcpu != current);
-       check_xen_space_overlap ("ptr_d", vadr, 1UL << log_range);
-
-       rr = PSCB(vcpu,rrs)[region];
+       check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
+
+       rr = PSCB(vcpu, rrs)[region];
        rid = rr & RR_RID_MASK;
 
        /* Purge TC  */
-       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
+       vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
 
        /* Purge tr and recompute dtr_regions.  */
        vcpu->arch.dtr_regions = 0;
        for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
-               if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
+               if (vcpu_match_tr_entry_range
+                   (trp, rid, vadr, vadr + addr_range))
                        vcpu_purge_tr_entry(trp);
                else if (trp->pte.p)
                        vcpu_quick_region_set(vcpu->arch.dtr_regions,
                                              trp->vadr);
 
-       vcpu_flush_tlb_vhpt_range (vadr, log_range);
-
-       return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
+       vcpu_flush_tlb_vhpt_range(vadr, log_range);
+
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
 {
        unsigned long region = vadr >> 61;
        u64 addr_range = 1UL << log_range;
@@ -2210,24 +2357,25 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v
        TR_ENTRY *trp;
 
        BUG_ON(vcpu != current);
-       check_xen_space_overlap ("ptr_i", vadr, 1UL << log_range);
-
-       rr = PSCB(vcpu,rrs)[region];
+       check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
+
+       rr = PSCB(vcpu, rrs)[region];
        rid = rr & RR_RID_MASK;
 
        /* Purge TC  */
-       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+       vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
 
        /* Purge tr and recompute itr_regions.  */
        vcpu->arch.itr_regions = 0;
        for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
-               if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
+               if (vcpu_match_tr_entry_range
+                   (trp, rid, vadr, vadr + addr_range))
                        vcpu_purge_tr_entry(trp);
                else if (trp->pte.p)
                        vcpu_quick_region_set(vcpu->arch.itr_regions,
                                              trp->vadr);
 
-       vcpu_flush_tlb_vhpt_range (vadr, log_range);
-
-       return IA64_NO_FAULT;
-}
+       vcpu_flush_tlb_vhpt_range(vadr, log_range);
+
+       return IA64_NO_FAULT;
+}
diff -r c5ddcf89f050 -r 78c494a16b95 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c  Tue Oct 17 15:43:41 2006 -0600
@@ -22,7 +22,7 @@
 #include <asm/vmmu.h>
 
 /* Defined in tlb.c  */
-extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
+extern void ia64_global_tlb_purge(u64 start, u64 end, u64 nbits);
 
 extern long running_on_sim;
 
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/dom_fw.h
--- a/xen/include/asm-ia64/dom_fw.h     Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/dom_fw.h     Tue Oct 17 15:43:41 2006 -0600
@@ -180,7 +180,7 @@
 
 #define EFI_MEMDESC_VERSION            1
 
-extern struct ia64_pal_retval xen_pal_emulator(UINT64, u64, u64, u64);
+extern struct ia64_pal_retval xen_pal_emulator(u64, u64, u64, u64);
 extern struct sal_ret_values sal_emulator (long index, unsigned long in1, 
unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, 
unsigned long in6, unsigned long in7);
 extern struct ia64_pal_retval pal_emulator_static (unsigned long);
 extern efi_status_t efi_emulator (struct pt_regs *regs, unsigned long *fault);
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h     Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/privop.h     Tue Oct 17 15:43:41 2006 -0600
@@ -4,9 +4,9 @@
 #include <asm/ia64_int.h>
 #include <asm/vcpu.h>
 
-extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr);
+extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, u64 isr);
 
-extern void privify_memory(void *start, UINT64 len);
+extern void privify_memory(void *start, u64 len);
 
 extern int ia64_hyperprivop(unsigned long iim, REGS *regs);
 
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/vcpu.h       Tue Oct 17 15:43:41 2006 -0600
@@ -10,195 +10,194 @@
 #include <asm/ia64_int.h>
 #include <xen/types.h>
 #include <public/xen.h>
-typedef        unsigned long UINT64;
-typedef        unsigned int UINT;
-typedef        int BOOLEAN;
+typedef int BOOLEAN;
 struct vcpu;
-typedef        struct vcpu VCPU;
+typedef struct vcpu VCPU;
 typedef cpu_user_regs_t REGS;
 extern u64 cycle_to_ns(u64 cycle);
 
 /* Note: PSCB stands for Privilegied State Communication Block.  */
 #define VCPU(_v,_x)    (_v->arch.privregs->_x)
-#define PSCB(_v,_x) VCPU(_v,_x)
-#define PSCBX(_v,_x) (_v->arch._x)
+#define PSCB(_v,_x)    VCPU(_v,_x)
+#define PSCBX(_v,_x)   (_v->arch._x)
 
 #define SPURIOUS_VECTOR 0xf
 
 /* general registers */
-extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned long reg);
-extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val);
-extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int 
nat);
-extern IA64FAULT vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct 
ia64_fpreg *val);
-
-extern IA64FAULT vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct 
ia64_fpreg *val);
+extern u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg);
+extern IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val);
+extern IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value,
+                             int nat);
+extern IA64FAULT vcpu_get_fpreg(VCPU * vcpu, unsigned long reg,
+                                struct ia64_fpreg *val);
+
+extern IA64FAULT vcpu_set_fpreg(VCPU * vcpu, unsigned long reg,
+                                struct ia64_fpreg *val);
 
 /* application registers */
-extern void vcpu_load_kernel_regs(VCPU *vcpu);
-extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
-extern IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val);
+extern void vcpu_load_kernel_regs(VCPU * vcpu);
+extern IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val);
 /* psr */
-extern BOOLEAN vcpu_get_psr_ic(VCPU *vcpu);
-extern UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr);
-extern IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm);
-extern IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm);
-extern IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_psr_i(VCPU *vcpu);
-extern IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu);
-extern IA64FAULT vcpu_set_psr_dt(VCPU *vcpu);
+extern BOOLEAN vcpu_get_psr_ic(VCPU * vcpu);
+extern u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr);
+extern IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm);
+extern IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm);
+extern IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_psr_i(VCPU * vcpu);
+extern IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu);
+extern IA64FAULT vcpu_set_psr_dt(VCPU * vcpu);
 /* control registers */
-extern IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_increment_iip(VCPU *vcpu);
-extern IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval);
-extern unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa);
-extern IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_increment_iip(VCPU * vcpu);
+extern IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval);
+extern unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
+extern IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval);
 /* interrupt registers */
-extern void vcpu_pend_unspecified_interrupt(VCPU *vcpu);
-extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
-extern IA64FAULT vcpu_get_itv(VCPU *vcpu,UINT64 *pval);
-extern IA64FAULT vcpu_get_pmv(VCPU *vcpu,UINT64 *pval);
-extern IA64FAULT vcpu_get_cmcv(VCPU *vcpu,UINT64 *pval);
-extern IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval);
-extern IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
-extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
+extern void vcpu_pend_unspecified_interrupt(VCPU * vcpu);
+extern u64 vcpu_check_pending_interrupts(VCPU * vcpu);
+extern IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval);
+extern IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val);
+extern IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val);
 /* interval timer registers */
-extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
-extern UINT64 vcpu_timer_pending_early(VCPU *vcpu);
+extern IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val);
+extern u64 vcpu_timer_pending_early(VCPU * vcpu);
 /* debug breakpoint registers */
-extern IA64FAULT vcpu_set_ibr(VCPU *vcpu,UINT64 reg,UINT64 val);
-extern IA64FAULT vcpu_set_dbr(VCPU *vcpu,UINT64 reg,UINT64 val);
-extern IA64FAULT vcpu_get_ibr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
-extern IA64FAULT vcpu_get_dbr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+extern IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval);
+extern IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval);
 /* performance monitor registers */
-extern IA64FAULT vcpu_set_pmc(VCPU *vcpu,UINT64 reg,UINT64 val);
-extern IA64FAULT vcpu_set_pmd(VCPU *vcpu,UINT64 reg,UINT64 val);
-extern IA64FAULT vcpu_get_pmc(VCPU *vcpu,UINT64 reg,UINT64 *pval);
-extern IA64FAULT vcpu_get_pmd(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+extern IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval);
+extern IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval);
 /* banked general registers */
-extern IA64FAULT vcpu_bsw0(VCPU *vcpu);
-extern IA64FAULT vcpu_bsw1(VCPU *vcpu);
+extern IA64FAULT vcpu_bsw0(VCPU * vcpu);
+extern IA64FAULT vcpu_bsw1(VCPU * vcpu);
 /* region registers */
-extern IA64FAULT vcpu_set_rr(VCPU *vcpu,UINT64 reg,UINT64 val);
-extern IA64FAULT vcpu_get_rr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
-extern IA64FAULT vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr);
+extern IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval);
+extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr);
 /* protection key registers */
-extern IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
-extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
-extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
+extern IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
+extern IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
 /* TLB */
-static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
+static inline void vcpu_purge_tr_entry(TR_ENTRY * trp)
 {
        trp->pte.val = 0;
 }
-extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
-               UINT64 itir, UINT64 ifa);
-extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
-               UINT64 itir, UINT64 ifa);
-extern IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
-extern IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
-extern IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range);
-extern IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
-extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 log_range);
-extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 log_range);
+extern IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 
ifa);
+extern IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 
ifa);
+extern IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
+extern IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
+extern IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range);
+extern IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr);
+extern IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range);
+extern IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range);
+extern IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range);
+extern IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range);
 union U_IA64_BUNDLE;
-extern int vcpu_get_domain_bundle(VCPU *vcpu, REGS *regs, UINT64 gip, union 
U_IA64_BUNDLE *bundle);
-extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
-                               UINT64 *pteval, UINT64 *itir, UINT64 *iha);
-extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
-extern IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa);
-extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
-extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);
+extern int vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
+                                  union U_IA64_BUNDLE *bundle);
+extern IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
+                                u64 * pteval, u64 * itir, u64 * iha);
+extern IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
+extern IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa);
+extern IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa);
+extern IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr);
 /* misc */
-extern IA64FAULT vcpu_rfi(VCPU *vcpu);
-extern IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
-extern IA64FAULT vcpu_cover(VCPU *vcpu);
-extern IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
-extern IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval);
-
-extern void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
-extern void vcpu_pend_timer(VCPU *vcpu);
-extern void vcpu_poke_timer(VCPU *vcpu);
-extern void vcpu_set_next_timer(VCPU *vcpu);
-extern BOOLEAN vcpu_timer_expired(VCPU *vcpu);
-extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu);
+extern IA64FAULT vcpu_rfi(VCPU * vcpu);
+extern IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
+extern IA64FAULT vcpu_cover(VCPU * vcpu);
+extern IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr);
+extern IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval);
+
+extern void vcpu_pend_interrupt(VCPU * vcpu, u64 vector);
+extern void vcpu_pend_timer(VCPU * vcpu);
+extern void vcpu_poke_timer(VCPU * vcpu);
+extern void vcpu_set_next_timer(VCPU * vcpu);
+extern BOOLEAN vcpu_timer_expired(VCPU * vcpu);
+extern u64 vcpu_deliverable_interrupts(VCPU * vcpu);
 struct p2m_entry;
-extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64, 
UINT64, struct p2m_entry*);
-extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
-extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
-
-extern IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot,
+extern void vcpu_itc_no_srlz(VCPU * vcpu, u64, u64, u64, u64, u64,
+                             struct p2m_entry *);
+extern u64 vcpu_get_tmp(VCPU *, u64);
+extern void vcpu_set_tmp(VCPU *, u64, u64);
+
+extern IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot,
                               u64 pte, u64 itir, u64 ifa, u64 rid);
-extern IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot,
+extern IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot,
                               u64 pte, u64 itir, u64 ifa, u64 rid);
 
 /* Initialize vcpu regs.  */
-extern void vcpu_init_regs (struct vcpu *v);
-
-static inline UINT64
-itir_ps(UINT64 itir)
-{
-    return ((itir >> 2) & 0x3f);
-}
-
-static inline UINT64
-itir_mask(UINT64 itir)
-{
-    return (~((1UL << itir_ps(itir)) - 1));
-}
-
-static inline s64
-vcpu_get_next_timer_ns(VCPU *vcpu)
-{
-    s64 vcpu_get_next_timer_ns;
-    u64 d = PSCBX(vcpu, domain_itm);
-    u64 now = ia64_get_itc();
-
-    if (d > now)
-        vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
-    else
-        vcpu_get_next_timer_ns = cycle_to_ns(local_cpu_data->itm_delta) + 
NOW();
-
-    return vcpu_get_next_timer_ns;
+extern void vcpu_init_regs(struct vcpu *v);
+
+static inline u64 itir_ps(u64 itir)
+{
+       return ((itir >> 2) & 0x3f);
+}
+
+static inline u64 itir_mask(u64 itir)
+{
+       return (~((1UL << itir_ps(itir)) - 1));
+}
+
+static inline s64 vcpu_get_next_timer_ns(VCPU * vcpu)
+{
+       s64 vcpu_get_next_timer_ns;
+       u64 d = PSCBX(vcpu, domain_itm);
+       u64 now = ia64_get_itc();
+
+       if (d > now)
+               vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
+       else
+               vcpu_get_next_timer_ns =
+                   cycle_to_ns(local_cpu_data->itm_delta) + NOW();
+
+       return vcpu_get_next_timer_ns;
 }
 
 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
@@ -209,5 +208,4 @@ vcpu_get_next_timer_ns(VCPU *vcpu)
 #define vcpu_quick_region_set(_tr_regions,_ifa)             \
     do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
 
-
 #endif
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/vmx_pal_vsa.h
--- a/xen/include/asm-ia64/vmx_pal_vsa.h        Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/vmx_pal_vsa.h        Tue Oct 17 15:43:41 2006 -0600
@@ -26,10 +26,9 @@
 /* PAL virtualization services */
 
 #ifndef __ASSEMBLY__
-extern UINT64 ia64_call_vsa(UINT64 proc,UINT64 arg1, UINT64 arg2,
-                   UINT64 arg3, UINT64 arg4, UINT64 arg5,
-                   UINT64 arg6, UINT64 arg7);
-extern UINT64 __vsa_base;
+extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
+                         u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+extern u64 __vsa_base;
 #endif  /* __ASSEMBLY__ */
 
 #define PAL_VPS_RESUME_NORMAL           0x0000
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Tue Oct 17 15:43:41 2006 -0600
@@ -90,7 +90,7 @@ extern void switch_to_physical_rid(VCPU 
 extern void switch_to_physical_rid(VCPU *);
 extern void switch_to_virtual_rid(VCPU *vcpu);
 extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
-extern void stlb_phys_lookup(VCPU *vcpu, UINT64 paddr, UINT64 type);
+extern void stlb_phys_lookup(VCPU *vcpu, u64 paddr, u64 type);
 extern void check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR 
new_psr);
 extern void prepare_if_physical_mode(VCPU *vcpu);
 extern void recover_if_physical_mode(VCPU *vcpu);
@@ -120,9 +120,4 @@ extern void physical_tlb_miss(VCPU *vcpu
 #define GUEST_VIRT  1   /* Guest in virtual mode */
 #define GUEST_PHYS  2   /* Guest in physical mode, requiring emulation */
 
-
-
 #endif /* _PHY_MODE_H_ */
-
-
-
diff -r c5ddcf89f050 -r 78c494a16b95 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Tue Oct 17 14:30:36 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Tue Oct 17 15:43:41 2006 -0600
@@ -1,4 +1,4 @@
-/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/* -*-  Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:nil -*- */
 /*
  * vmx_vcpu.h:
  * Copyright (c) 2005, Intel Corporation.
@@ -23,7 +23,6 @@
 #ifndef _XEN_IA64_VMX_VCPU_H
 #define _XEN_IA64_VMX_VCPU_H
 
-
 #include <xen/sched.h>
 #include <asm/ia64_int.h>
 #include <asm/vmx_vpd.h>
@@ -33,464 +32,438 @@
 #include <asm/types.h>
 #include <asm/vcpu.h>
 
-#define VRN_SHIFT    61
-#define VRN0    0x0UL
-#define VRN1    0x1UL
-#define VRN2    0x2UL
-#define VRN3    0x3UL
-#define VRN4    0x4UL
-#define VRN5    0x5UL
-#define VRN6    0x6UL
-#define VRN7    0x7UL
+#define VRN_SHIFT      61
+#define VRN0           0x0UL
+#define VRN1           0x1UL
+#define VRN2           0x2UL
+#define VRN3           0x3UL
+#define VRN4           0x4UL
+#define VRN5           0x5UL
+#define VRN6           0x6UL
+#define VRN7           0x7UL
 // for vlsapic
-#define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
+#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
 
 #define VMX(x,y)  ((x)->arch.arch_vmx.y)
 
-
-#define VMM_RR_SHIFT    20
-#define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
-
-extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
-extern u64 cr_igfld_mask (int index, u64 value);
-extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
-extern u64 set_isr_ei_ni (VCPU *vcpu);
-extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
-
+#define VMM_RR_SHIFT   20
+#define VMM_RR_MASK    ((1UL<<VMM_RR_SHIFT)-1)
+
+extern u64 indirect_reg_igfld_MASK(int type, int index, u64 value);
+extern u64 cr_igfld_mask(int index, u64 value);
+extern int check_indirect_reg_rsv_fields(int type, int index, u64 value);
+extern u64 set_isr_ei_ni(VCPU * vcpu);
+extern u64 set_isr_for_na_inst(VCPU * vcpu, int op);
 
 /* next all for VTI domain APIs definition */
-extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
-extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
-extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
-extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
-extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
-extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
-IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
-extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa);
-extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa);
-extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 
itir, UINT64 ifa);
-extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 
itir, UINT64 ifa);
-extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
-extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
-extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
-extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
-extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
-extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
-extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
-extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
-extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
-extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
-extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
-extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
-extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
-extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
-extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int 
nat);
+extern void vmx_vcpu_set_psr(VCPU * vcpu, unsigned long value);
+extern u64 vmx_vcpu_sync_mpsr(u64 mipsr, u64 value);
+extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value);
+extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
+extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
+IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
+extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
+extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
+extern IA64FAULT vmx_vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
+                                u64 ifa);
+extern IA64FAULT vmx_vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
+                                u64 ifa);
+extern IA64FAULT vmx_vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 ps);
+extern IA64FAULT vmx_vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 ps);
+extern IA64FAULT vmx_vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 ps);
+extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
+extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
+extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
+extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
+extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
+extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval);
+extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
+extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
+extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
+extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
+extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
+extern IA64FAULT vmx_vcpu_set_bgr(VCPU * vcpu, unsigned int reg, u64 val,
+                                  int nat);
 #if 0
-extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
-extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
-#endif
-extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
-extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
-extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
-extern void vtm_init(VCPU *vcpu);
-extern uint64_t vtm_get_itc(VCPU *vcpu);
-extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
-extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
-extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
-extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
+extern IA64FAULT vmx_vcpu_get_gr(VCPU * vcpu, unsigned reg, u64 * val);
+extern IA64FAULT vmx_vcpu_set_gr(VCPU * vcpu, unsigned reg, u64 value, int 
nat);
+#endif
+extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24);
+extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU * vcpu, u64 imm24);
+extern IA64FAULT vmx_vcpu_set_psr_l(VCPU * vcpu, u64 val);
+extern void vtm_init(VCPU * vcpu);
+extern uint64_t vtm_get_itc(VCPU * vcpu);
+extern void vtm_set_itc(VCPU * vcpu, uint64_t new_itc);
+extern void vtm_set_itv(VCPU * vcpu, uint64_t val);
+extern void vtm_set_itm(VCPU * vcpu, uint64_t val);
+extern void vtm_interruption_update(VCPU * vcpu, vtime_t * vtm);
 //extern void vtm_domain_out(VCPU *vcpu);
 //extern void vtm_domain_in(VCPU *vcpu);
-extern void vlsapic_reset(VCPU *vcpu);
-extern int vmx_check_pending_irq(VCPU *vcpu);
-extern void guest_write_eoi(VCPU *vcpu);
-extern int is_unmasked_irq(VCPU *vcpu);
-extern uint64_t guest_read_vivr(VCPU *vcpu);
-extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
-extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);
-extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
-extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
-extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, 
size_t s);
-extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, 
size_t s);
-extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
-extern void vcpu_load_kernel_regs(VCPU *vcpu);
-extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
-extern IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu);
-extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
-
-extern void dtlb_fault (VCPU *vcpu, u64 vadr);
-extern void nested_dtlb (VCPU *vcpu);
-extern void alt_dtlb (VCPU *vcpu, u64 vadr);
-extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
-extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
-extern void data_page_not_present(VCPU *vcpu, u64 vadr);
-extern void inst_page_not_present(VCPU *vcpu, u64 vadr);
-extern void data_access_rights(VCPU *vcpu, u64 vadr);
+extern void vlsapic_reset(VCPU * vcpu);
+extern int vmx_check_pending_irq(VCPU * vcpu);
+extern void guest_write_eoi(VCPU * vcpu);
+extern int is_unmasked_irq(VCPU * vcpu);
+extern uint64_t guest_read_vivr(VCPU * vcpu);
+extern void vmx_inject_vhpi(VCPU * vcpu, u8 vec);
+extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
+extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU * vcpu);
+extern void memread_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
+extern void memread_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
+                      size_t s);
+extern void memwrite_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
+                       size_t s);
+extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
+extern void vcpu_load_kernel_regs(VCPU * vcpu);
+extern IA64FAULT vmx_vcpu_increment_iip(VCPU * vcpu);
+extern IA64FAULT vmx_vcpu_decrement_iip(VCPU * vcpu);
+extern void vmx_switch_rr7(unsigned long, shared_info_t *, void *, void *,
+                           void *);
+
+extern void dtlb_fault(VCPU * vcpu, u64 vadr);
+extern void nested_dtlb(VCPU * vcpu);
+extern void alt_dtlb(VCPU * vcpu, u64 vadr);
+extern void dvhpt_fault(VCPU * vcpu, u64 vadr);
+extern void dnat_page_consumption(VCPU * vcpu, uint64_t vadr);
+extern void data_page_not_present(VCPU * vcpu, u64 vadr);
+extern void inst_page_not_present(VCPU * vcpu, u64 vadr);
+extern void data_access_rights(VCPU * vcpu, u64 vadr);
 
 /**************************************************************************
  VCPU control register access routines
 **************************************************************************/
 
-static inline
-IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,dcr);
-    return (IA64_NO_FAULT);
-}
-
-static inline
-IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,itm);
-    return (IA64_NO_FAULT);
-}
-
-static inline
-IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,iva);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,pta);
-    return (IA64_NO_FAULT);
-}
-
-static inline
-IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,lid);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = guest_read_vivr(vcpu);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,tpr);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = 0L;  // reads of eoi always return 0
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,irr[0]);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,irr[1]);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,irr[2]);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,irr[3]);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,itv);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,pmv);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,cmcv);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,lrr0);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
-{
-    *pval = VCPU(vcpu,lrr1);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
-{
-    u64 mdcr, mask;
-    VCPU(vcpu,dcr)=val;
-    /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
-    mdcr = ia64_get_dcr();
-    /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
-    mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
-    mdcr = ( mdcr & mask ) | ( val & (~mask) );
-    ia64_set_dcr( mdcr);
-    VMX(vcpu, mdcr) = mdcr;
-    return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
-{
-    vtm_set_itm(vcpu, val);
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,iva)=val;
-    return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,pta)=val;
-    return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,lid)=val;
-    return IA64_NO_FAULT;
-}
-extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
-
-static inline
-IA64FAULT
-vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
-{
-    guest_write_eoi(vcpu);
-    return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
-{
-
-    vtm_set_itv(vcpu, val);
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,pmv)=val;
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,cmcv)=val;
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,lrr0)=val;
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
-{
-    VCPU(vcpu,lrr1)=val;
-    return IA64_NO_FAULT;
-}
-
-
-
+static inline IA64FAULT vmx_vcpu_get_dcr(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, dcr);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, itm);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, iva);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, pta);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, lid);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval)
+{
+       *pval = guest_read_vivr(vcpu);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, tpr);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval)
+{
+       *pval = 0L;             // reads of eoi always return 0
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, irr[0]);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, irr[1]);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, irr[2]);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, irr[3]);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, itv);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, pmv);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, cmcv);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, lrr0);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
+{
+       *pval = VCPU(vcpu, lrr1);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_dcr(VCPU * vcpu, u64 val)
+{
+       u64 mdcr, mask;
+       VCPU(vcpu, dcr) = val;
+       /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
+       mdcr = ia64_get_dcr();
+       /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
+       mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
+       mdcr = (mdcr & mask) | (val & (~mask));
+       ia64_set_dcr(mdcr);
+       VMX(vcpu, mdcr) = mdcr;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
+{
+       vtm_set_itm(vcpu, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_iva(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, iva) = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_pta(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, pta) = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_lid(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, lid) = val;
+       return IA64_NO_FAULT;
+}
+extern IA64FAULT vmx_vcpu_set_tpr(VCPU * vcpu, u64 val);
+
+static inline IA64FAULT vmx_vcpu_set_eoi(VCPU * vcpu, u64 val)
+{
+       guest_write_eoi(vcpu);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_itv(VCPU * vcpu, u64 val)
+{
+
+       vtm_set_itv(vcpu, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_pmv(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, pmv) = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_cmcv(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, cmcv) = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_lrr0(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, lrr0) = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_lrr1(VCPU * vcpu, u64 val)
+{
+       VCPU(vcpu, lrr1) = val;
+       return IA64_NO_FAULT;
+}
 
 /**************************************************************************
  VCPU privileged application register access routines
 **************************************************************************/
-static inline
-IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
-{
-    vtm_set_itc(vcpu, val);
-    return  IA64_NO_FAULT;
-}
-static inline
-IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
-{
-    *val = vtm_get_itc(vcpu);
-    return  IA64_NO_FAULT;
-}
+static inline IA64FAULT vmx_vcpu_set_itc(VCPU * vcpu, u64 val)
+{
+       vtm_set_itc(vcpu, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val)
+{
+       *val = vtm_get_itc(vcpu);
+       return IA64_NO_FAULT;
+}
+
 /*
 static inline
-IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
+IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u64 reg, u64 *pval)
 {
     *pval = VMX(vcpu,vrr[reg>>61]);
-    return (IA64_NO_FAULT);
+    return IA64_NO_FAULT;
 }
  */
 /**************************************************************************
  VCPU debug breakpoint register access routines
 **************************************************************************/
 
-static inline
-IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    // TODO: unimplemented DBRs return a reserved register fault
-    // TODO: Should set Logical CPU state, not just physical
-    if(reg > 4){
-        panic_domain(vcpu_regs(vcpu),"there are only five cpuid registers");
-    }
-    *pval=VCPU(vcpu,vcpuid[reg]);
-    return (IA64_NO_FAULT);
-}
-
-
-static inline
-IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-    // TODO: unimplemented DBRs return a reserved register fault
-    // TODO: Should set Logical CPU state, not just physical
-    ia64_set_dbr(reg,val);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-    // TODO: unimplemented IBRs return a reserved register fault
-    // TODO: Should set Logical CPU state, not just physical
-    ia64_set_ibr(reg,val);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    // TODO: unimplemented DBRs return a reserved register fault
-    UINT64 val = ia64_get_dbr(reg);
-    *pval = val;
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    // TODO: unimplemented IBRs return a reserved register fault
-    UINT64 val = ia64_get_ibr(reg);
-    *pval = val;
-    return (IA64_NO_FAULT);
+static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       // TODO: unimplemented DBRs return a reserved register fault
+       // TODO: Should set Logical CPU state, not just physical
+       if (reg > 4) {
+               panic_domain(vcpu_regs(vcpu),
+                            "there are only five cpuid registers");
+       }
+       *pval = VCPU(vcpu, vcpuid[reg]);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
+{
+       // TODO: unimplemented DBRs return a reserved register fault
+       // TODO: Should set Logical CPU state, not just physical
+       ia64_set_dbr(reg, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
+{
+       // TODO: unimplemented IBRs return a reserved register fault
+       // TODO: Should set Logical CPU state, not just physical
+       ia64_set_ibr(reg, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       // TODO: unimplemented DBRs return a reserved register fault
+       u64 val = ia64_get_dbr(reg);
+       *pval = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       // TODO: unimplemented IBRs return a reserved register fault
+       u64 val = ia64_get_ibr(reg);
+       *pval = val;
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU performance monitor register access routines
 **************************************************************************/
-static inline
-IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-    // TODO: Should set Logical CPU state, not just physical
-    // NOTE: Writes to unimplemented PMC registers are discarded
-    ia64_set_pmc(reg,val);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
-{
-    // TODO: Should set Logical CPU state, not just physical
-    // NOTE: Writes to unimplemented PMD registers are discarded
-    ia64_set_pmd(reg,val);
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    // NOTE: Reads from unimplemented PMC registers return zero
-    UINT64 val = (UINT64)ia64_get_pmc(reg);
-    *pval = val;
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
-{
-    // NOTE: Reads from unimplemented PMD registers return zero
-    UINT64 val = (UINT64)ia64_get_pmd(reg);
-    *pval = val;
-    return (IA64_NO_FAULT);
+static inline IA64FAULT vmx_vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
+{
+       // TODO: Should set Logical CPU state, not just physical
+       // NOTE: Writes to unimplemented PMC registers are discarded
+       ia64_set_pmc(reg, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
+{
+       // TODO: Should set Logical CPU state, not just physical
+       // NOTE: Writes to unimplemented PMD registers are discarded
+       ia64_set_pmd(reg, val);
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       // NOTE: Reads from unimplemented PMC registers return zero
+       u64 val = (u64) ia64_get_pmc(reg);
+       *pval = val;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
+{
+       // NOTE: Reads from unimplemented PMD registers return zero
+       u64 val = (u64) ia64_get_pmd(reg);
+       *pval = val;
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
  VCPU banked general register access routines
 **************************************************************************/
 #if 0
-static inline
-IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
-{
-
-    VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
-    return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
-{
-
-    VCPU(vcpu,vpsr) |= IA64_PSR_BN;
-    return (IA64_NO_FAULT);
+static inline IA64FAULT vmx_vcpu_bsw0(VCPU * vcpu)
+{
+
+       VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
+       return IA64_NO_FAULT;
+}
+
+static inline IA64FAULT vmx_vcpu_bsw1(VCPU * vcpu)
+{
+
+       VCPU(vcpu, vpsr) |= IA64_PSR_BN;
+       return IA64_NO_FAULT;
 }
 #endif
 #if 0
 /* Another hash performance algorithm */
 #define redistribute_rid(rid)  (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | 
(((rid) >> 8) & 0xff))
 #endif
-static inline unsigned long
-vrrtomrr(VCPU *v, unsigned long val)
-{
-    ia64_rr rr;
-
-    rr.rrval=val;
-    rr.rid = rr.rid + v->arch.starting_rid;
-    if (rr.ps > PAGE_SHIFT)
-        rr.ps = PAGE_SHIFT;
-    rr.ve = 1;
-    return  vmMangleRID(rr.rrval);
+static inline unsigned long vrrtomrr(VCPU * v, unsigned long val)
+{
+       ia64_rr rr;
+
+       rr.rrval = val;
+       rr.rid = rr.rid + v->arch.starting_rid;
+       if (rr.ps > PAGE_SHIFT)
+               rr.ps = PAGE_SHIFT;
+       rr.ve = 1;
+       return vmMangleRID(rr.rrval);
 /* Disable this rid allocation algorithm for now */
 #if 0
-    rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
-    rr.rid = redistribute_rid(rid);
-#endif 
-
-}
-static inline thash_cb_t *
-vmx_vcpu_get_vtlb(VCPU *vcpu)
-{
-    return &vcpu->arch.vtlb;
-}
-
-static inline thash_cb_t *
-vcpu_get_vhpt(VCPU *vcpu)
-{
-    return &vcpu->arch.vhpt;
-}
-
-#endif
+       rid = (((u64) vcpu->domain->domain_id) << DOMAIN_RID_SHIFT) + rr.rid;
+       rr.rid = redistribute_rid(rid);
+#endif
+
+}
+static inline thash_cb_t *vmx_vcpu_get_vtlb(VCPU * vcpu)
+{
+       return &vcpu->arch.vtlb;
+}
+
+static inline thash_cb_t *vcpu_get_vhpt(VCPU * vcpu)
+{
+       return &vcpu->arch.vhpt;
+}
+
+#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.