[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] Merge guest TR emulation



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 1abf3783975d8c120a0a6918133fa37330c0d2a7
# Parent  551f7935f79a842341ec5baf5fccb9c56bd5473b
[IA64] Merge guest TR emulation

This patch is intended to merge guest TR emulation both on VTIdomain
and para-domain.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmmu.c  Fri Mar 10 15:52:12 2006
@@ -31,6 +31,7 @@
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
+#include <asm/vcpu.h>
 #include <xen/irq.h>
 
 /*
@@ -68,14 +69,14 @@
 /*
  * The VRN bits of va stand for which rr to get.
  */
-ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
-{
-    ia64_rr   vrr;
-    vmx_vcpu_get_rr(vcpu, va, &vrr.rrval);
-    return vrr;
-}
-
-
+//ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
+//{
+//    ia64_rr   vrr;
+//    vcpu_get_rr(vcpu, va, &vrr.rrval);
+//    return vrr;
+//}
+
+/*
 void recycle_message(thash_cb_t *hcb, u64 para)
 {
     if(hcb->ht == THASH_VHPT)
@@ -84,7 +85,7 @@
     }
     printk("hcb=%p recycled with %lx\n",hcb,para);
 }
-
+ */
 
 /*
  * Purge all guest TCs in logical processor.
@@ -102,7 +103,6 @@
     u32 stride1,stride2;
     u32 i,j;
     u64 psr;
-    
 
     result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
     if ( result.status != 0 ) {
@@ -113,7 +113,7 @@
     count2 = LOW_32BITS (result.v1);
     stride1 = HIGH_32BITS(result.v2);
     stride2 = LOW_32BITS (result.v2);
-    
+
     local_irq_save(psr);
     for (i=0; i<count1; i++) {
         for (j=0; j<count2; j++) {
@@ -133,24 +133,10 @@
 //    struct page_info *page;
     thash_cb_t  *vhpt;
     PTA pta_value;
-/*
-    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
-    if ( page == NULL ) {
-        panic("No enough contiguous memory for init_domain_mm\n");
-    }
-    vbase = page_to_virt(page);
-    printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
-    memset(vbase, 0, VCPU_VHPT_SIZE);
- */
-//    vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE);
     vcur -= sizeof (thash_cb_t);
     vhpt = vcur;
     vhpt->ht = THASH_VHPT;
     vhpt->vcpu = d;
-//    vhpt->hash_func = machine_thash;
-//    vcur -= sizeof (vhpt_special);
-//    vs = vcur;
-
     /* Setup guest pta */
     pta_value.val = 0;
     pta_value.ve = 1;
@@ -159,14 +145,10 @@
     pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
     d->arch.arch_vmx.mpta = pta_value.val;
 
-//    vhpt->vs = vs;
-//    vhpt->vs->get_mfn = __gpfn_to_mfn_foreign;
-//    vhpt->vs->tag_func = machine_ttag;
     vhpt->hash = vbase;
     vhpt->hash_sz = VCPU_VHPT_SIZE/2;
     vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
     vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
-//    vhpt->recycle_notifier = recycle_message;
     thash_init(vhpt,VCPU_VHPT_SHIFT-1);
     return vhpt;
 }
@@ -177,9 +159,8 @@
 {
     struct page_info *page;
     void    *vbase, *vhptbase, *vcur;
-    tlb_special_t  *ts;
     thash_cb_t  *tlb;
-    
+
     page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
         panic("No enough contiguous memory for init_domain_mm\n");
@@ -193,10 +174,7 @@
     tlb = vcur;
     tlb->ht = THASH_TLB;
     tlb->vcpu = d;
-    vcur -= sizeof (tlb_special_t);
-    ts = vcur;
-    tlb->ts = ts;
-    tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase);
+    tlb->vhpt = init_domain_vhpt(d,vhptbase,vbase);
 //    tlb->hash_func = machine_thash;
     tlb->hash = vbase;
     tlb->hash_sz = VCPU_VTLB_SIZE/2;
@@ -207,27 +185,6 @@
     return tlb;
 }
 
-/* Allocate physical to machine mapping table for domN
- * FIXME: Later this interface may be removed, if that table is provided
- * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
- * this interface at all.
- */
-#if 0
-void
-alloc_pmt(struct domain *d)
-{
-    struct page_info *page;
-
-    /* Only called once */
-    ASSERT(d->arch.pmt);
-
-    page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0);
-    ASSERT(page);
-
-    d->arch.pmt = page_to_virt(page);
-    memset(d->arch.pmt, 0x55, d->max_pages * 8);
-}
-#endif
 /*
  * Insert guest TLB to machine TLB.
  *  data:   In TLB format
@@ -240,7 +197,6 @@
     unsigned long mtlb_ppn;
     mtlb.ifa = tlb->vadr;
     mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
-    //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
     mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
     mtlb.ppn = get_mfn(d->domain,tlb->ppn);
     mtlb_ppn=mtlb.ppn;
@@ -311,7 +267,7 @@
     IA64_PSR  vpsr; 
 
     vpsr.val = vmx_vcpu_get_psr(vcpu);
-    vrr = vmx_vcpu_rr(vcpu, vadr);
+    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
     vmx_vcpu_get_pta(vcpu,&vpta.val);
 
     if ( vrr.ve & vpta.ve ) {
@@ -355,21 +311,18 @@
     u64     *vpa;
     thash_data_t    *tlb;
     thash_cb_t *hcb;
-    ia64_rr vrr;
     u64     mfn;
 
     if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
         gpip = gip;
     }
     else {
-        vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
-       hcb = vmx_vcpu_get_vtlb(vcpu);
-        tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB );
-        if( tlb == NULL )
-             tlb = vtlb_lookup_ex (hcb,
-                vrr.rid, gip, DSIDE_TLB );
-        if (tlb) 
-               gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
+           hcb = vmx_vcpu_get_vtlb(vcpu);
+        tlb = vtlb_lookup(hcb, gip, ISIDE_TLB);
+//        if( tlb == NULL )
+//             tlb = vtlb_lookup(hcb, gip, DSIDE_TLB );
+        if (tlb)
+               gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & 
(PSIZE(tlb->ps)-1) );
     }
     if( gpip){
         mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
@@ -388,236 +341,146 @@
 
 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
-
-    thash_data_t data, *ovl;
-    thash_cb_t  *hcb;
-    search_section_t sections;
-    ia64_rr vrr;
-
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
-    data.itir=itir;
-    data.vadr=PAGEALIGN(ifa,data.ps);
-    data.tc = 1;
-    data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
-    data.rid = vrr.rid;
-    
-    sections.tr = 1;
-    sections.tc = 0;
-
-    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
-    while (ovl) {
+    int slot;
+    u64 ps, va;
+    thash_cb_t  *hcb;
+
+    ps = itir_ps(itir);
+    va = PAGEALIGN(ifa, ps);
+    slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
+    if (slot >=0) {
         // generate MCA.
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    thash_purge_and_insert(hcb, &data, ifa);
-    return IA64_NO_FAULT;
-}
-
-
-
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_and_insert(hcb, pte, itir, ifa);
+    return IA64_NO_FAULT;
+}
 
 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
-
-    thash_data_t data, *ovl;
-    thash_cb_t  *hcb;
-    search_section_t sections;
-    ia64_rr vrr;
-
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
-    data.itir=itir;
-    data.vadr=PAGEALIGN(ifa,data.ps);
-    data.tc = 1;
-    data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
-    data.rid = vrr.rid;
-    sections.tr = 1;
-    sections.tc = 0;
-
-    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
-    if (ovl) {
-          // generate MCA.
-        panic("Tlb conflict!!");
-        return IA64_FAULT;
-    }
-    thash_purge_and_insert(hcb, &data, ifa);
-    return IA64_NO_FAULT;
-}
-
-/*
- * Return TRUE/FALSE for success of lock operation
- */
-
-/*
-int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
-{
-
-    thash_cb_t  *hcb;
-    ia64_rr vrr;
-    u64          preferred_size;
-
-    vmx_vcpu_get_rr(vcpu, va, &vrr);
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    va = PAGEALIGN(va,vrr.ps);
-    preferred_size = PSIZE(vrr.ps);
-    return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
-}
- */
-
-
-
-IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, 
UINT64 idx)
-{
-
-    thash_data_t data, *ovl;
-    thash_cb_t  *hcb;
-    search_section_t sections;
-    ia64_rr vrr;
-    /* u64 mfn,psr; */
-
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
-    data.itir=itir;
-    data.vadr=PAGEALIGN(ifa,data.ps);
-    data.tc = 0;
-    data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
-    data.rid = vrr.rid;
-    sections.tr = 1;
-    sections.tc = 0;
-
-
-    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
-    if (ovl) {
+    int slot;
+    u64 ps, va, gpfn;
+    thash_cb_t  *hcb;
+
+    ps = itir_ps(itir);
+    va = PAGEALIGN(ifa, ps);
+    slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
+    if (slot >=0) {
         // generate MCA.
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    sections.tr = 0;
-    sections.tc = 1;
-    thash_purge_entries(hcb, &data, sections);
-/*    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
-        data.contiguous=1;
-    }
- */
-    thash_tr_insert(hcb, &data, ifa, idx);
-/*
-    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
-        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
-        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
-        data.ppn = xen_to_arch_ppn(mfn);
-        psr = ia64_clear_ic();
-        ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, 
data.ps);
-        ia64_set_psr(psr);      // restore psr
-        ia64_srlz_i();
-//        return IA64_NO_FAULT;
-    }
-*/
-    return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, 
UINT64 idx)
-{
-
-    thash_data_t data, *ovl;
-    thash_cb_t  *hcb;
-    search_section_t sections;
-    ia64_rr    vrr;
-    /* u64 mfn,psr; */
-
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
-    data.itir=itir;
-    data.vadr=PAGEALIGN(ifa,data.ps);
-    data.tc = 0;
-    data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
-    data.rid = vrr.rid;
-    sections.tr = 1;
-    sections.tc = 0;
-
-    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
-    while (ovl) {
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
+    if(__gpfn_is_io(vcpu->domain,gpfn))
+        pte |= VTLB_PTE_IO;
+    thash_purge_and_insert(hcb, pte, itir, ifa);
+    return IA64_NO_FAULT;
+
+}
+
+
+
+
+IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+    int index;
+    u64 ps, va, rid;
+    thash_cb_t  *hcb;
+
+    ps = itir_ps(itir);
+    va = PAGEALIGN(ifa, ps);
+    index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
+    if (index >=0) {
         // generate MCA.
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    sections.tr = 0;
-    sections.tc = 1;
-    thash_purge_entries(hcb, &data, sections);
-/*
-    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
-        data.contiguous=1;
-    }
- */
-    thash_tr_insert(hcb, &data, ifa, idx);
-/*
-    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
-        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
-        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
-        data.ppn = xen_to_arch_ppn(mfn);
-        psr = ia64_clear_ic();
-        ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, 
data.ps);
-        ia64_set_psr(psr);      // restore psr
-        ia64_srlz_i();
-//        return IA64_NO_FAULT;
-    }
-*/
-
-    return IA64_NO_FAULT;
-}
-
-
-
-IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
-{
-    thash_cb_t  *hcb;
-    ia64_rr rr;
-    search_section_t sections;
-
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    rr=vmx_vcpu_rr(vcpu,vadr);
-    sections.tr = 1;
-    sections.tc = 1;
-    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
-    return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
-{
-    thash_cb_t  *hcb;
-    ia64_rr rr;
-    search_section_t sections;
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    rr=vmx_vcpu_rr(vcpu,vadr);
-    sections.tr = 1;
-    sections.tc = 1;
-    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
-    return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
-{
-    thash_cb_t  *hcb;
-    ia64_rr vrr;
-    search_section_t sections;
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    vrr=vmx_vcpu_rr(vcpu,vadr);
-    sections.tr = 0;
-    sections.tc = 1;
-    vadr = PAGEALIGN(vadr, ps);
-
-    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
-    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
-    return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_entries(hcb, va, ps);
+    vcpu_get_rr(vcpu, va, &rid);
+    rid = rid& RR_RID_MASK;
+    vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, 
rid);
+    vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
+    return IA64_NO_FAULT;
+}
+
+
+IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+    int index;
+    u64 ps, va, gpfn, rid;
+    thash_cb_t  *hcb;
+
+    ps = itir_ps(itir);
+    va = PAGEALIGN(ifa, ps);
+    index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
+    if (index>=0) {
+        // generate MCA.
+        panic("Tlb conflict!!");
+        return IA64_FAULT;
+    }
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_entries(hcb, va, ps);
+    gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
+    if(__gpfn_is_io(vcpu->domain,gpfn))
+        pte |= VTLB_PTE_IO;
+    vcpu_get_rr(vcpu, va, &rid);
+    rid = rid& RR_RID_MASK;
+    vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, 
rid);
+    vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
+    return IA64_NO_FAULT;
+}
+
+
+
+IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
+{
+    int index;
+    u64 va;
+    thash_cb_t  *hcb;
+
+    va = PAGEALIGN(ifa, ps);
+    index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
+    if (index>=0) {
+        vcpu->arch.dtrs[index].p=0;
+        index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
+    }
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_entries(hcb, va, ps);
+    return IA64_NO_FAULT;
+}
+
+IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
+{
+    int index;
+    u64 va;
+    thash_cb_t  *hcb;
+
+    va = PAGEALIGN(ifa, ps);
+    index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
+    if (index>=0) {
+        vcpu->arch.itrs[index].p=0;
+        index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
+    }
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_entries(hcb, va, ps);
+    return IA64_NO_FAULT;
+}
+
+IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
+{
+    thash_cb_t  *hcb;
+    va = PAGEALIGN(va, ps);
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    thash_purge_entries(hcb, va, ps);
+    return IA64_NO_FAULT;
+}
+
+
+IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
 {
     thash_cb_t  *hcb;
     hcb = vmx_vcpu_get_vtlb(vcpu);
@@ -625,15 +488,15 @@
     return IA64_NO_FAULT;
 }
 
-IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
-{
-    vmx_vcpu_ptc_l(vcpu, vadr, ps);
+IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
+{
+    vmx_vcpu_ptc_l(vcpu, va, ps);
     return IA64_ILLOP_FAULT;
 }
 
-IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
-{
-    vmx_vcpu_ptc_l(vcpu, vadr, ps);
+IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
+{
+    vmx_vcpu_ptc_l(vcpu, va, ps);
     return IA64_NO_FAULT;
 }
 
@@ -644,7 +507,7 @@
     ia64_rr vrr;
     u64 vhpt_offset;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
-    vrr=vmx_vcpu_rr(vcpu, vadr);
+    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
     if(vpta.vf){
         panic("THASH,Don't support long format VHPT");
         *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
@@ -663,7 +526,7 @@
     ia64_rr vrr;
     PTA vpta;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
-    vrr=vmx_vcpu_rr(vcpu, vadr);
+    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
     if(vpta.vf){
         panic("THASH,Don't support long format VHPT");
         *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
@@ -679,13 +542,11 @@
 {
     thash_data_t *data;
     thash_cb_t  *hcb;
-    ia64_rr vrr;
     ISR visr,pt_isr;
     REGS *regs;
     u64 vhpt_adr;
     IA64_PSR vpsr;
     hcb = vmx_vcpu_get_vtlb(vcpu);
-    vrr=vmx_vcpu_rr(vcpu,vadr);
     regs=vcpu_regs(vcpu);
     pt_isr.val=VMX(vcpu,cr_isr);
     visr.val=0;
@@ -696,7 +557,7 @@
          visr.ni=1;
     }
     visr.na=1;
-    data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
+    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
     if(data){
         if(data->p==0){
             visr.na=1;
@@ -744,8 +605,7 @@
         }
         else{
             vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
-            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
-            data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
+            data = vtlb_lookup(hcb, vhpt_adr, DSIDE_TLB);
             if(data){
                 if(vpsr.ic){
                     vcpu_set_isr(vcpu, visr.val);
@@ -776,7 +636,6 @@
 {
     thash_data_t *data;
     thash_cb_t  *hcb;
-    ia64_rr rr;
     PTA vpta;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
     if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
@@ -784,8 +643,7 @@
         return IA64_NO_FAULT;
     }
     hcb = vmx_vcpu_get_vtlb(vcpu);
-    rr=vmx_vcpu_rr(vcpu,vadr);
-    data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
+    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
     if(!data||!data->p){
         *key=1;
     }else{
@@ -821,11 +679,9 @@
     unsigned long      end;    /* end of the area mapped by current entry */
     thash_data_t       *entry;
     struct vcpu *v = current;
-    ia64_rr    vrr;
 
     vtlb = vmx_vcpu_get_vtlb(v); 
-    vrr = vmx_vcpu_rr(v, va);
-    entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
+    entry = vtlb_lookup(vtlb, va, DSIDE_TLB);
     if (entry == NULL)
        return -EFAULT;
 
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Mar 10 15:52:12 2006
@@ -36,7 +36,7 @@
 #include <xen/domain.h>
 
 extern long do_sched_op(int cmd, unsigned long arg);
-
+extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
 
 void hyper_not_support(void)
 {
@@ -126,7 +126,7 @@
     vcpu_set_gr(vcpu, 8, ret, 0);
     vmx_vcpu_increment_iip(vcpu);
 }
-
+/*
 static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
 {
     ia64_rr rr;
@@ -135,7 +135,7 @@
     rr = vmx_vcpu_rr(vcpu, va);
     return thash_lock_tc(hcb, va ,1U<<rr.ps, rr.rid, DSIDE_TLB, lock);
 }
-
+ */
 /*
  * Lock guest page in vTLB, so that it's not relinquished by recycle
  * session when HV is servicing that hypercall.
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c      Fri Mar 10 15:52:12 2006
@@ -96,7 +96,7 @@
        if (!(vp_env_info & VP_OPCODE))
                printk("WARNING: no opcode provided from hardware(%lx)!!!\n", 
vp_env_info);
        vm_order = get_order(buffer_size);
-       printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
+       printk("vm buffer size: %ld, order: %d\n", buffer_size, vm_order);
 
        vmx_enabled = 1;
 no_vti:
@@ -161,7 +161,7 @@
                return NULL;
        }
 
-       printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
+       printk("vpd base: 0x%lp, vpd size:%ld\n", vpd, sizeof(vpd_t));
        memset(vpd, 0, VPD_SIZE);
        /* CPUID init */
        for (i = 0; i < 5; i++)
@@ -234,7 +234,7 @@
 {
        u64 status;
 
-       status = ia64_pal_vp_restore(v->arch.privregs, 0);
+       status = ia64_pal_vp_restore((u64)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS)
                panic("Restore vp status failed\n");
 
@@ -307,7 +307,6 @@
 
 int vmx_alloc_contig_pages(struct domain *d)
 {
-       unsigned int order;
        unsigned long i, j, start,tmp, end, pgnr, conf_nr;
        struct page_info *page;
        struct vcpu *v = d->vcpu[0];
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_irq_ia64.c
--- a/xen/arch/ia64/vmx/vmx_irq_ia64.c  Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c  Fri Mar 10 15:52:12 2006
@@ -128,6 +128,6 @@
         * come through until ia64_eoi() has been done.
         */
        vmx_irq_exit();
-       if (current && wake_dom0 != dom0 ) 
+       if (wake_dom0 && current->domain != dom0 ) 
                vcpu_wake(dom0->vcpu[0]);
 }
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Mar 10 15:52:12 2006
@@ -218,7 +218,7 @@
     extern void * pal_vaddr;
     vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void 
*)vcpu->domain->shared_info,
                 (void *)vcpu->arch.privregs,
-                ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+                (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
     ia64_set_pta(vcpu->arch.arch_vmx.mpta);
 
        ia64_srlz_d();
@@ -260,10 +260,10 @@
 
     psr=ia64_clear_ic();
 
-    mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
+    vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
     ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
     ia64_srlz_d();
-    mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
+    vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
     ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
     ia64_srlz_d();
     ia64_set_psr(psr);
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_process.c   Fri Mar 10 15:52:12 2006
@@ -292,10 +292,9 @@
 vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
 {
     IA64_PSR vpsr;
-    CACHE_LINE_TYPE type=ISIDE_TLB;
+    int type=ISIDE_TLB;
     u64 vhpt_adr, gppa;
     ISR misr;
-    ia64_rr vrr;
 //    REGS *regs;
     thash_cb_t *vtlb;
     thash_data_t *data;
@@ -330,16 +329,17 @@
         physical_tlb_miss(v, vadr, vec);
         return IA64_FAULT;
     }
-    vrr = vmx_vcpu_rr(v, vadr);
     if(vec == 1) type = ISIDE_TLB;
     else if(vec == 2) type = DSIDE_TLB;
     else panic("wrong vec\n");
 
 //    prepare_if_physical_mode(v);
 
-    if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
-       gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
-        if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
+    if((data=vtlb_lookup(vtlb, vadr,type))!=0){
+//     gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+//        if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
+        if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
+               gppa = 
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
             emulate_io_inst(v, gppa, data->ma);
             return IA64_FAULT;
         }
@@ -353,7 +353,7 @@
         }
         else{
  */
-            thash_vhpt_insert(vtlb->ts->vhpt,data,vadr);
+            thash_vhpt_insert(vtlb->vhpt,data->page_flags, data->itir ,vadr);
 //        }
 //         }
     }else if(type == DSIDE_TLB){
@@ -374,8 +374,7 @@
             }
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            vrr=vmx_vcpu_rr(v,vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
+            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup(vtlb, vhpt_adr, 
DSIDE_TLB)){
                 if(vpsr.ic){
                     vcpu_set_isr(v, misr.val);
                     dtlb_fault(v, vadr);
@@ -417,8 +416,7 @@
             return IA64_FAULT;
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            vrr=vmx_vcpu_rr(v,vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
+            if(vhpt_lookup(vhpt_adr) || vtlb_lookup(vtlb, vhpt_adr, 
DSIDE_TLB)){
                 if(!vpsr.ic){
                     misr.ni=1;
                 }
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Fri Mar 10 15:52:12 2006
@@ -204,32 +204,24 @@
 }
 
 
-ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
-{
-        return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
-}
-
 
 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
 {
     ia64_rr oldrr,newrr;
     thash_cb_t *hcb;
     extern void * pal_vaddr;
-    oldrr=vmx_vcpu_rr(vcpu,reg);
+    vcpu_get_rr(vcpu, reg, &oldrr.rrval);
     newrr.rrval=val;
-#if 1
     if(oldrr.ps!=newrr.ps){
         hcb = vmx_vcpu_get_vtlb(vcpu);
         thash_purge_all(hcb);
     }
-#endif
     VMX(vcpu,vrr[reg>>61]) = val;
-
     switch((u64)(reg>>61)) {
     case VRN7:
-       vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
+        vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
         (void *)vcpu->arch.privregs,
-       ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+        (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
        break;
     default:
         ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
@@ -275,7 +267,7 @@
 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
 {
     ia64_rr rr,rr1;
-    rr=vmx_vcpu_rr(vcpu,ifa);
+    vcpu_get_rr(vcpu,ifa,&rr.rrval);
     rr1.rrval=0;
     rr1.ps=rr.ps;
     rr1.rid=rr.rid;
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Fri Mar 10 15:52:12 2006
@@ -572,7 +572,7 @@
    }
 #endif // VMAL_NO_FAULT_CHECK
 
-    return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot));
+    return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
 }
 
 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
@@ -631,7 +631,7 @@
    }
 #endif // VMAL_NO_FAULT_CHECK
 
-   return (vmx_vcpu_itr_i(vcpu,pte,itir,ifa,slot));
+   return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
 }
 
 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 
*pte)
@@ -972,7 +972,7 @@
         rsv_reg_field(vcpu);
     }
 #endif  //CHECK_FAULT
-    vmx_vcpu_get_rr(vcpu,r3,&r1);
+    vcpu_get_rr(vcpu,r3,&r1);
     return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
 }
 
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/vmx/vtlb.c  Fri Mar 10 15:52:12 2006
@@ -32,7 +32,7 @@
 #include <asm/tlbflush.h>
 #define  MAX_CCH_LENGTH     40
 
-thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *);
+thash_data_t *__alloc_chain(thash_cb_t *);
 
 static void cch_mem_init(thash_cb_t *hcb)
 {
@@ -71,126 +71,31 @@
  * Check to see if the address rid:va is translated by the TLB
  */
 
-static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, 
CACHE_LINE_TYPE cl)
-{
-    u64  size;
-    size = PSIZE(tlb->ps);
-    if(tlb->vadr&(size-1))
-        while(1);
-    if ((tlb->rid == rid) && ((va-tlb->vadr)<size))
-        return 1;
-    else
+static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
+{
+    return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
+}
+
+/*
+ * Only for GUEST TR format.
+ */
+static int
+__is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
+{
+    uint64_t sa1, ea1;
+
+    if (!trp->p || trp->rid != rid ) {
         return 0;
-}
-
-/*
- * Only for GUEST TR format.
- */
-static int
-__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, 
u64 eva)
-{
-    uint64_t size, sa1, ea1;
-
-//    if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) {
-    if ( entry->invalid || entry->rid != rid ) {
-        return 0;
-    }
-    size = PSIZE(entry->ps);
-    sa1 = entry->vadr;
-    ea1 = sa1 + size -1;
+    }
+    sa1 = trp->vadr;
+    ea1 = sa1 + PSIZE(trp->ps) -1;
     eva -= 1;
-    if(sa1&(size-1))
-        while(1);
     if ( (sva>ea1) || (sa1>eva) )
         return 0;
     else
         return 1;
 
 }
-
-static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
-{
-/*
-    if ( hcb->remove_notifier ) {
-        (hcb->remove_notifier)(hcb,tr);
-    }
-*/
-    tr->invalid = 1;
-}
-
-static inline void __set_tr (thash_data_t *tr, thash_data_t *data, int idx)
-{
-    *tr = *data;
-    tr->tr_idx = idx;
-}
-
-
-static void __init_tr(thash_cb_t *hcb)
-{
-    int i;
-    thash_data_t *tr;
-
-    for ( i=0, tr = &ITR(hcb,0); i<NITRS; i++ ) {
-        tr[i].invalid = 1;
-    }
-    for ( i=0, tr = &DTR(hcb,0); i<NDTRS; i++ ) {
-        tr[i].invalid = 1;
-    }
-}
-
-/*
- * Replace TR entry.
- */
-static void rep_tr(thash_cb_t *hcb,thash_data_t *insert, int idx)
-{
-    thash_data_t *tr;
-
-    if ( insert->cl == ISIDE_TLB ) {
-        tr = &ITR(hcb,idx);
-    }
-    else {
-        tr = &DTR(hcb,idx);
-    }
-    if ( !INVALID_TR(tr) ) {
-        __rem_tr(hcb, tr);
-    }
-    __set_tr (tr, insert, idx);
-}
-
-/*
- * remove TR entry.
- */
-/*
-static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
-{
-    thash_data_t *tr;
-
-    if ( cl == ISIDE_TLB ) {
-        tr = &ITR(hcb,idx);
-    }
-    else {
-        tr = &DTR(hcb,idx);
-    }
-    if ( !INVALID_TR(tr) ) {
-        __rem_tr(hcb, tr);
-    }
-}
- */
-/*
- * Delete an thash entry in collision chain.
- *  prev: the previous entry.
- *  rem: the removed entry.
- */
-/*
-static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem)
-{
-    //prev->next = rem->next;
-    if ( hcb->remove_notifier ) {
-         (hcb->remove_notifier)(hcb,rem);
-    }
-    cch_free (hcb, rem);
-}
- */
 
 /*
  * Delete an thash entry leading collision chain.
@@ -212,69 +117,35 @@
     }
 }
 
-thash_data_t *__vtr_lookup(thash_cb_t *hcb,
-            u64 rid, u64 va,
-            CACHE_LINE_TYPE cl)
-{
-    thash_data_t    *tr;
-    int   num,i;
-
-    if ( cl == ISIDE_TLB ) {
-        tr = &ITR(hcb,0);
-        num = NITRS;
+thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
+{
+
+    thash_data_t  *trp;
+    int  i;
+    u64 rid;
+    vcpu_get_rr(vcpu, va, &rid);
+    rid = rid&RR_RID_MASK;;
+    if (is_data) {
+        if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
+            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, 
trp++) {
+                if (__is_tr_translated(trp, rid, va)) {
+                    return trp;
+                }
+            }
+        }
     }
     else {
-        tr = &DTR(hcb,0);
-        num = NDTRS;
-    }
-    for ( i=0; i<num; i++ ) {
-        if ( !INVALID_TR(&tr[i]) &&
-            __is_tr_translated(&tr[i], rid, va, cl) )
-            return &tr[i];
+        if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
+            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, 
trp++) {
+                if (__is_tr_translated(trp, rid, va)) {
+                    return trp;
+                }
+            }
+        }
     }
     return NULL;
 }
 
-
-/*
- * Find overlap VHPT entry within current collision chain
- * base on internal priv info.
- */
-/*
-static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
-{
-    thash_data_t    *cch;
-    thash_internal_t *priv = &hcb->priv;
-
-
-    for (cch=priv->cur_cch; cch; cch = cch->next) {
-        if ( priv->tag == cch->etag  ) {
-            return cch;
-        }
-    }
-    return NULL;
-}
-*/
-/*
- * Find overlap TLB/VHPT entry within current collision chain
- * base on internal priv info.
- */
-/*
-static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
-{
-    thash_data_t    *cch;
-    thash_internal_t *priv = &hcb->priv;
-
-    // Find overlap TLB entry
-    for (cch=priv->cur_cch; cch; cch = cch->next) {
-        if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
-            __is_translated( cch, priv->rid, priv->_curva, priv->cl)) {
-            return cch;
-        }
-    }
-    return NULL;
-}
- */
 
 /*
  * Get the machine format of VHPT entry.
@@ -292,24 +163,16 @@
  *  0/1: means successful or fail.
  *
  */
-int __tlb_to_vhpt(thash_cb_t *hcb,
-            thash_data_t *tlb, u64 va,
-            thash_data_t *vhpt)
+int __tlb_to_vhpt(thash_cb_t *hcb, thash_data_t *vhpt, u64 va)
 {
     u64 padr,pte;
-//    ia64_rr vrr;
     ASSERT ( hcb->ht == THASH_VHPT );
-//    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps;
-    padr += va&((1UL<<tlb->ps)-1);
+    padr = vhpt->ppn >>(vhpt->ps-ARCH_PAGE_SHIFT)<<vhpt->ps;
+    padr += va&((1UL<<vhpt->ps)-1);
     pte=lookup_domain_mpa(current->domain,padr);
     if((pte>>56))
         return 0;
-    // TODO with machine discontinuous address space issue.
     vhpt->etag = ia64_ttag(va);
-    //vhpt->ti = 0;
-    vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
-    vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
     vhpt->ps = PAGE_SHIFT;
     vhpt->ppn = 
(pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
     vhpt->next = 0;
@@ -331,17 +194,20 @@
 
 /*  vhpt only has entries with PAGE_SIZE page size */
 
-void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
 {
     thash_data_t   vhpt_entry, *hash_table, *cch;
+    vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
+    vhpt_entry.itir=itir;
+
 //    ia64_rr vrr;
 
-    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
+    if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
         return;
     //panic("Can't convert to machine VHPT entry\n");
     }
 
-    hash_table = (thash_data_t *)ia64_thash(va);
+    hash_table = (thash_data_t *)ia64_thash(ifa);
     if( INVALID_VHPT(hash_table) ) {
         *hash_table = vhpt_entry;
         hash_table->next = 0;
@@ -358,6 +224,7 @@
         }
         cch = cch->next;
     }
+
     if(hash_table->len>=MAX_CCN_DEPTH){
        thash_remove_cch(hcb, hash_table);
        cch = cch_alloc(hcb);
@@ -367,9 +234,9 @@
         hash_table->next = cch;
        return;
     }
-       
+
     // TODO: Add collision chain length limitation.
-     cch = __alloc_chain(hcb,entry);
+     cch = __alloc_chain(hcb);
      if(cch == NULL){
            *hash_table = vhpt_entry;
             hash_table->next = 0;
@@ -377,10 +244,8 @@
             *cch = *hash_table;
             *hash_table = vhpt_entry;
             hash_table->next = cch;
-           hash_table->len = cch->len + 1;
-           cch->len = 0;       
-//            if(hash_table->tag==hash_table->next->tag)
-//                while(1);
+           hash_table->len = cch->len + 1;
+           cch->len = 0;
 
     }
     return /*hash_table*/;
@@ -414,7 +279,7 @@
     thash_data_t *hash_table, *prev, *next;
     u64 start, end, size, tag, rid;
     ia64_rr vrr;
-    vrr=vmx_vcpu_rr(current, va);
+    vcpu_get_rr(current, va, &vrr.rrval);
     rid = vrr.rid;
     size = PSIZE(ps);
     start = va & (-size);
@@ -480,36 +345,6 @@
     }
     machine_tlb_purge(va, ps);
 }
-/*
- * Insert an entry to hash table. 
- *    NOTES:
- *  1: TLB entry may be TR, TC or Foreign Map. For TR entry,
- *     itr[]/dtr[] need to be updated too.
- *  2: Inserting to collision chain may trigger recycling if 
- *     the buffer for collision chain is empty.
- *  3: The new entry is inserted at the next of hash table.
- *     (I.e. head of the collision chain)
- *  4: The buffer holding the entry is allocated internally
- *     from cch_buf or just in the hash table.
- *  5: Return the entry in hash table or collision chain.
- *  6: Input parameter, entry, should be in TLB format.
- *      I.e. Has va, rid, ps...
- *  7: This API is invoked by emulating ITC/ITR and tlb_miss.
- *
- */
-
-void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
-{
-    if ( hcb->ht != THASH_TLB || entry->tc ) {
-        panic("wrong parameter\n");
-    }
-    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
-    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
-    rep_tr(hcb, entry, idx);
-//    thash_vhpt_insert(hcb->ts->vhpt, entry, va);
-    return ;
-}
-
 
 /*
  * Recycle all collisions chain in VTLB or VHPT.
@@ -525,30 +360,13 @@
         thash_remove_cch(hcb,hash_table);
     }
 }
-/*
-thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
+
+thash_data_t *__alloc_chain(thash_cb_t *hcb)
 {
     thash_data_t *cch;
 
     cch = cch_alloc(hcb);
     if(cch == NULL){
-        thash_recycle_cch(hcb);
-        cch = cch_alloc(hcb);
-    }
-    return cch;
-}
-*/
-
-thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
-{
-    thash_data_t *cch;
-
-    cch = cch_alloc(hcb);
-    if(cch == NULL){
-        // recycle
-//        if ( hcb->recycle_notifier ) {
-//                hcb->recycle_notifier(hcb,(u64)entry);
-//        }
         thash_recycle_cch(hcb);
         cch = cch_alloc(hcb);
     }
@@ -564,474 +382,117 @@
  *  3: The caller need to make sure the new entry will not overlap
  *     with any existed entry.
  */
-void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va)
 {
     thash_data_t    *hash_table, *cch;
     /* int flag; */
     ia64_rr vrr;
     /* u64 gppn, ppns, ppne; */
-    u64 tag;
-    vrr=vmx_vcpu_rr(current, va);
-    if (vrr.ps != entry->ps) {
+    u64 tag, ps;
+    ps = itir_ps(itir);
+    vcpu_get_rr(current, va, &vrr.rrval);
+    if (vrr.ps != ps) {
 //        machine_tlb_insert(hcb->vcpu, entry);
        panic("not preferred ps with va: 0x%lx\n", va);
        return;
     }
-    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
-    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
     hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
-    entry->etag = tag;
     if( INVALID_TLB(hash_table) ) {
-        *hash_table = *entry;
+        hash_table->page_flags = pte;
+        hash_table->itir=itir;
+        hash_table->etag=tag;
         hash_table->next = 0;
     }
     else if (hash_table->len>=MAX_CCN_DEPTH){
         thash_remove_cch(hcb, hash_table);
         cch = cch_alloc(hcb);
         *cch = *hash_table;
-        *hash_table = *entry;
+        hash_table->page_flags = pte;
+        hash_table->itir=itir;
+        hash_table->etag=tag;
         hash_table->len = 1;
         hash_table->next = cch;
     }
+
     else {
         // TODO: Add collision chain length limitation.
-        cch = __alloc_chain(hcb,entry);
+        cch = __alloc_chain(hcb);
         if(cch == NULL){
-            *hash_table = *entry;
+            hash_table->page_flags = pte;
+            hash_table->itir=itir;
+            hash_table->etag=tag;
             hash_table->next = 0;
         }else{
             *cch = *hash_table;
-            *hash_table = *entry;
+            hash_table->page_flags = pte;
+            hash_table->itir=itir;
+            hash_table->etag=tag;
             hash_table->next = cch;
             hash_table->len = cch->len + 1;
             cch->len = 0;
         }
     }
-#if 0
-    if(hcb->vcpu->domain->domain_id==0){
-       thash_insert(hcb->ts->vhpt, entry, va);
-        return;
-    }
-#endif
-/*
-    flag = 1;
-    gppn = 
(POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
-    ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
-    ppne = ppns + PSIZE(entry->ps);
-    if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
-        flag = 0;
-    if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
-       thash_insert(hcb->ts->vhpt, entry, va);
-*/
     return ;
 }
 
 
-/*
-void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
-{
-    thash_data_t    *hash_table;
-    ia64_rr vrr;
-    
-    vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr);
-    if ( entry->ps != vrr.ps && entry->tc ) {
-        panic("Not support for multiple page size now\n");
-    }
-    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
-    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
-    (hcb->ins_hash)(hcb, entry, va);
-    
-}
-*/
-/*
-static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
-{
-    thash_data_t    *hash_table, *p, *q;
-    thash_internal_t *priv = &hcb->priv;
-    int idx;
-
-    hash_table = priv->hash_base;
-    if ( hash_table == entry ) {
-//        if ( PURGABLE_ENTRY(hcb, entry) ) {
-            __rem_hash_head (hcb, entry);
-//        }
-        return ;
-    }
-    // remove from collision chain
-    p = hash_table;
-    for ( q=p->next; q; q = p->next ) {
-        if ( q == entry ){
-//            if ( PURGABLE_ENTRY(hcb,q ) ) {
-                p->next = q->next;
-                __rem_chain(hcb, entry);
-                hash_table->len--;
-//            }
-            return ;
-        }
-        p = q;
-    }
-    panic("Entry not existed or bad sequence\n");
-}
-*/
-/*
-static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
-{
-    thash_data_t    *hash_table, *p, *q;
-    thash_internal_t *priv = &hcb->priv;
-    int idx;
-    
-    if ( !entry->tc ) {
-        return rem_tr(hcb, entry->cl, entry->tr_idx);
-    }
-    rem_thash(hcb, entry);
-}    
-*/
-int   cch_depth=0;
-/*
- * Purge the collision chain starting from cch.
- * NOTE:
- *     For those UN-Purgable entries(FM), this function will return
- * the head of left collision chain.
- */
-/*
-static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
-{
-    thash_data_t *next;
-
-//    if ( ++cch_depth > MAX_CCH_LENGTH ) {
-//        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
-//        while(1);
-//   }
-    if ( cch -> next ) {
-        next = thash_rem_cch(hcb, cch->next);
+int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
+{
+    thash_data_t  *trp;
+    int  i;
+    u64 end, rid;
+    vcpu_get_rr(vcpu, va, &rid);
+    rid = rid&RR_RID_MASK;;
+    end = va + PSIZE(ps);
+    if (is_data) {
+        if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
+            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, 
trp++) {
+                if (__is_tr_overlap(trp, rid, va, end )) {
+                    return i;
+                }
+            }
+        }
     }
     else {
-        next = NULL;
-    }
-    if ( PURGABLE_ENTRY(hcb, cch) ) {
-        __rem_chain(hcb, cch);
-        return next;
-    }
-    else {
-        cch->next = next;
-        return cch;
-    }
-}
- */
-
-/*
- * Purge one hash line (include the entry in hash table).
- * Can only be called by thash_purge_all.
- * Input:
- *  hash: The head of collision chain (hash table)
- *
- */
-/*
-static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
-{
-    if ( INVALID_ENTRY(hcb, hash) ) return;
-
-    if ( hash->next ) {
-        cch_depth = 0;
-        hash->next = thash_rem_cch(hcb, hash->next);
-    }
-    // Then hash table itself.
-    if ( PURGABLE_ENTRY(hcb, hash) ) {
-        __rem_hash_head(hcb, hash);
-    }
-}
- */
-
-/*
- * Find an overlap entry in hash table and its collision chain.
- * Refer to SDM2 4.1.1.4 for overlap definition.
- *    PARAS:
- *  1: in: TLB format entry, rid:ps must be same with vrr[].
- *         va & ps identify the address space for overlap lookup
- *  2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
- *  3: cl means I side or D side.
- *    RETURNS:
- *  NULL to indicate the end of findings.
- *    NOTES:
- *
- */
-
-/*
-thash_data_t *thash_find_overlap(thash_cb_t *hcb,
-            thash_data_t *in, search_section_t s_sect)
-{
-    return (hcb->find_overlap)(hcb, in->vadr,
-            PSIZE(in->ps), in->rid, in->cl, s_sect);
-}
-*/
-
-/*
-static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
-        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
-{
-    thash_data_t    *hash_table;
-    thash_internal_t *priv = &hcb->priv;
-    u64     tag;
-    ia64_rr vrr;
-
-    priv->_curva = va & ~(size-1);
-    priv->_eva = priv->_curva + size;
-    priv->rid = rid;
-    vrr = vmx_vcpu_rr(hcb->vcpu,va);
-    priv->ps = vrr.ps;
-    hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag);
-    priv->s_sect = s_sect;
-    priv->cl = cl;
-    priv->_tr_idx = 0;
-    priv->hash_base = hash_table;
-    priv->cur_cch = hash_table;
-    return (hcb->next_overlap)(hcb);
-}
-*/
-
-/*
-static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
-        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
-{
-    thash_data_t    *hash_table;
-    thash_internal_t *priv = &hcb->priv;
-    u64     tag;
-    ia64_rr vrr;
-
-    priv->_curva = va & ~(size-1);
-    priv->_eva = priv->_curva + size;
-    priv->rid = rid;
-    vrr = vmx_vcpu_rr(hcb->vcpu,va);
-    priv->ps = vrr.ps;
-    hash_table = ia64_thash(priv->_curva);
-    tag = ia64_ttag(priv->_curva);
-    priv->tag = tag;
-    priv->hash_base = hash_table;
-    priv->cur_cch = hash_table;
-    return (hcb->next_overlap)(hcb);
-}
-*/
-
-
-thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl)
-{
-    thash_data_t    *tr;
-    int  i,num;
-    u64 end;
-
-    if (cl == ISIDE_TLB ) {
-        num = NITRS;
-        tr = &ITR(hcb,0);
-    }
-    else {
-        num = NDTRS;
-        tr = &DTR(hcb,0);
-    }
-    end=data->vadr + PSIZE(data->ps);
-    for (i=0; i<num; i++ ) {
-        if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) {
-            return &tr[i];
-        }
-    }
-    return NULL;
-}
-
-
-/*
-static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
-{
-    thash_data_t    *tr;
-    thash_internal_t *priv = &hcb->priv;
-    int   num;
-
-    if ( priv->cl == ISIDE_TLB ) {
-        num = NITRS;
-        tr = &ITR(hcb,0);
-    }
-    else {
-        num = NDTRS;
-        tr = &DTR(hcb,0);
-    }
-    for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
-        if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx],
-                priv->rid, priv->cl,
-                priv->_curva, priv->_eva) ) {
-            return &tr[priv->_tr_idx++];
-        }
-    }
-    return NULL;
-}
-*/
-
-/*
- * Similar with vtlb_next_overlap but find next entry.
- *    NOTES:
- *  Intermediate position information is stored in hcb->priv.
- */
-/*
-static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
-{
-    thash_data_t    *ovl;
-    thash_internal_t *priv = &hcb->priv;
-    u64 addr,rr_psize,tag;
-    ia64_rr vrr;
-
-    if ( priv->s_sect.tr ) {
-        ovl = vtr_find_next_overlap (hcb);
-        if ( ovl ) return ovl;
-        priv->s_sect.tr = 0;
-    }
-    if ( priv->s_sect.v == 0 ) return NULL;
-    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
-    rr_psize = PSIZE(vrr.ps);
-
-    while ( priv->_curva < priv->_eva ) {
-        if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
-            ovl = _vtlb_next_overlap_in_chain(hcb);
-            if ( ovl ) {
-                priv->cur_cch = ovl->next;
-                return ovl;
+        if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
+            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, 
trp++) {
+                if (__is_tr_overlap(trp, rid, va, end )) {
+                    return i;
+                }
             }
         }
-        priv->_curva += rr_psize;
-        priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag);
-        priv->cur_cch = priv->hash_base;
-    }
-    return NULL;
-}
- */
-
-
-/*
-static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
-{
-    thash_data_t    *ovl;
-    thash_internal_t *priv = &hcb->priv;
-    u64 addr,rr_psize;
-    ia64_rr vrr;
-
-    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
-    rr_psize = PSIZE(vrr.ps);
-
-    while ( priv->_curva < priv->_eva ) {
-        if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
-            ovl = _vhpt_next_overlap_in_chain(hcb);
-            if ( ovl ) {
-                priv->cur_cch = ovl->next;
-                return ovl;
-            }
-        }
-        priv->_curva += rr_psize;
-        priv->hash_base = ia64_thash(priv->_curva);
-        priv->tag = ia64_ttag(priv->_curva);
-        priv->cur_cch = priv->hash_base;
-    }
-    return NULL;
-}
-*/
-
-/*
- * Find and purge overlap entries in hash table and its collision chain.
- *    PARAS:
- *  1: in: TLB format entry, rid:ps must be same with vrr[].
- *         rid, va & ps identify the address space for purge
- *  2: section can be combination of TR, TC and FM. (thash_SECTION_XX)
- *  3: cl means I side or D side.
- *    NOTES:
- *
- */
-void thash_purge_entries(thash_cb_t *hcb,
-            thash_data_t *in, search_section_t p_sect)
-{
-    return thash_purge_entries_ex(hcb, in->rid, in->vadr,
-            in->ps, p_sect, in->cl);
-}
-
-void thash_purge_entries_ex(thash_cb_t *hcb,
-            u64 rid, u64 va, u64 ps,
-            search_section_t p_sect,
-            CACHE_LINE_TYPE cl)
-{
-/*
-    thash_data_t    *ovl;
-
-    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
-    while ( ovl != NULL ) {
-        (hcb->rem_hash)(hcb, ovl);
-        ovl = (hcb->next_overlap)(hcb);
-    };
- */
+    }
+    return -1;
+}
+
+/*
+ * Purge entries in VTLB and VHPT
+ */
+void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps)
+{
     vtlb_purge(hcb, va, ps);
-    vhpt_purge(hcb->ts->vhpt, va, ps);
-}
+    vhpt_purge(hcb->vhpt, va, ps);
+}
+
 
 /*
  * Purge overlap TCs and then insert the new entry to emulate itc ops.
  *    Notes: Only TC entry can purge and insert.
  */
-void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va)
-{
-    /* thash_data_t    *ovl; */
-    search_section_t sections;
-
-#ifdef   XEN_DEBUGGER
-    vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr);
-       if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
-               panic ("Oops, wrong call for purge_and_insert\n");
-               return;
-       }
-#endif
-    in->vadr = PAGEALIGN(in->vadr,in->ps);
-    in->ppn = PAGEALIGN(in->ppn, in->ps-12);
-    sections.tr = 0;
-    sections.tc = 1;
-/*
-    ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
-                                in->rid, in->cl, sections);
-    if(ovl)
-        (hcb->rem_hash)(hcb, ovl);
- */
-    vtlb_purge(hcb, va, in->ps);
-    vhpt_purge(hcb->ts->vhpt, va, in->ps);
-#ifdef   XEN_DEBUGGER
-    ovl = (hcb->next_overlap)(hcb);
-    if ( ovl ) {
-               panic ("Oops, 2+ overlaps for purge_and_insert\n");
-               return;
-    }
-#endif
-    if(in->ps!=PAGE_SHIFT)
-        vtlb_insert(hcb, in, va);
-    thash_vhpt_insert(hcb->ts->vhpt, in, va);
-}
-/*
- * Purge one hash line (include the entry in hash table).
- * Can only be called by thash_purge_all.
- * Input:
- *  hash: The head of collision chain (hash table)
- *
- */
-/*
-static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
-{
-    if ( INVALID_ENTRY(hcb, hash) ) return;
-    thash_data_t *prev, *next;
-    next=hash->next;
-    while ( next ) {
-        prev=next;
-        next=next->next;
-        cch_free(hcb, prev);
-    }
-    // Then hash table itself.
-    INVALIDATE_HASH(hcb, hash);
-}
-*/
-
-
-
-
-
-
+void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
+{
+    u64 ps, va;
+    ps = itir_ps(itir);
+    va = PAGEALIGN(ifa,ps);
+    vtlb_purge(hcb, va, ps);
+    vhpt_purge(hcb->vhpt, va, ps);
+    if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO))
+        vtlb_insert(hcb, pte, itir, va);
+    if(!(pte&VTLB_PTE_IO)){
+        va = PAGEALIGN(ifa,PAGE_SHIFT);
+        thash_vhpt_insert(hcb->vhpt, pte, itir, va);
+    }
+}
 
 
 
@@ -1064,27 +525,12 @@
     }
     cch_mem_init (hcb);
 
-    vhpt = hcb->ts->vhpt;
+    vhpt = hcb->vhpt;
     hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
     for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
         INVALIDATE_VHPT_HEADER(hash_table);
     }
     cch_mem_init (vhpt);
-    
-/*
-    entry = &hcb->ts->itr[0];
-    for(i=0; i< (NITRS+NDTRS); i++){
-        if(!INVALID_TLB(entry)){
-            start=entry->vadr & (-PSIZE(entry->ps));
-            end = start + PSIZE(entry->ps);
-            while(start<end){
-                thash_vhpt_insert(vhpt, entry, start);
-                start += PAGE_SIZE;
-            }
-        }
-        entry++;
-    }
-*/
     local_flush_tlb_all();
 }
 
@@ -1096,100 +542,32 @@
  * INPUT:
  *  in: TLB format for both VHPT & TLB.
  */
-thash_data_t *vtlb_lookup(thash_cb_t *hcb, 
-            thash_data_t *in)
-{
-    return vtlb_lookup_ex(hcb, in->rid, in->vadr, in->cl);
-}
-
-thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
-            u64 rid, u64 va,
-            CACHE_LINE_TYPE cl)
+
+thash_data_t *vtlb_lookup(thash_cb_t *hcb, u64 va,int is_data)
 {
     thash_data_t    *hash_table, *cch;
     u64     tag;
     ia64_rr vrr;
-   
+
     ASSERT ( hcb->ht == THASH_TLB );
-    
-    cch = __vtr_lookup(hcb, rid, va, cl);;
+
+    cch = __vtr_lookup(hcb->vcpu, va, is_data);;
     if ( cch ) return cch;
 
-    vrr = vmx_vcpu_rr(hcb->vcpu,va);
+    vcpu_get_rr(hcb->vcpu,va,&vrr.rrval);
     hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
 
     if ( INVALID_ENTRY(hcb, hash_table ) )
         return NULL;
 
-        
+
     for (cch=hash_table; cch; cch = cch->next) {
-//        if ( __is_translated(cch, rid, va, cl) )
         if(cch->etag == tag)
             return cch;
     }
     return NULL;
 }
 
-/*
- * Lock/Unlock TC if found.
- *     NOTES: Only the page in prefered size can be handled.
- *   return:
- *          1: failure
- *          0: success
- */
-/*
-int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int 
lock)
-{
-       thash_data_t    *ovl;
-       search_section_t        sections;
-
-    sections.tr = 1;
-    sections.tc = 1;
-       ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
-       if ( ovl ) {
-               if ( !ovl->tc ) {
-//                     panic("Oops, TR for lock\n");
-                       return 0;
-               }
-               else if ( lock ) {
-                       if ( ovl->locked ) {
-                               DPRINTK("Oops, already locked entry\n");
-                       }
-                       ovl->locked = 1;
-               }
-               else if ( !lock ) {
-                       if ( !ovl->locked ) {
-                               DPRINTK("Oops, already unlocked entry\n");
-                       }
-                       ovl->locked = 0;
-               }
-               return 0;
-       }
-       return 1;
-}
-*/
-
-/*
- * Notifier when TLB is deleted from hash table and its collision chain.
- * NOTES:
- *  The typical situation is that TLB remove needs to inform
- * VHPT to remove too.
- * PARAS:
- *  1: hcb is TLB object.
- *  2: The format of entry is always in TLB.
- *
- */
-//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
-//{
-//    vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps);
-//    thash_cb_t  *vhpt;
-    
-//    search_section_t    s_sect;
-    
-//    s_sect.v = 0;
-//    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
-//    machine_tlb_purge(entry->vadr, entry->ps);
-//}
 
 /*
  * Initialize internal control data before service.
@@ -1206,28 +584,15 @@
     hcb->pta.size = sz;
 //    hcb->get_rr_fn = vmmu_get_rr;
     ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
-    if ( hcb->ht == THASH_TLB ) {
-//        hcb->remove_notifier =  NULL;        //tlb_remove_notifier;
-//        hcb->find_overlap = vtlb_find_overlap;
-//        hcb->next_overlap = vtlb_next_overlap;
-//        hcb->rem_hash = rem_vtlb;
-//        hcb->ins_hash = vtlb_insert;
-        __init_tr(hcb);
-    }
-    else {
-//        hcb->remove_notifier =  NULL;
-//        hcb->find_overlap = vhpt_find_overlap;
-//        hcb->next_overlap = vhpt_next_overlap;
-//        hcb->rem_hash = rem_thash;
-//        hcb->ins_hash = thash_vhpt_insert;
-    }
     hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
 
     for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
         INVALIDATE_HASH_HEADER(hcb,hash_table);
     }
 }
+
 #ifdef  VTLB_DEBUG
+/*
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
 u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
@@ -1264,7 +629,7 @@
     thash_data_t  *hash, *cch;
     thash_data_t    *ovl;
     search_section_t s_sect;
-    thash_cb_t *vhpt = vtlb->ts->vhpt;
+    thash_cb_t *vhpt = vtlb->vhpt;
     u64   invalid_ratio;
  
     if ( sanity_check == 0 ) return;
@@ -1403,4 +768,5 @@
     }
     printf("End of vTLB dump\n");
 }
+*/
 #endif
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/xen/irq.c   Fri Mar 10 15:52:12 2006
@@ -1338,6 +1338,7 @@
     struct domain *guest[IRQ_MAX_GUESTS];
 } irq_guest_action_t;
 
+/*
 static void __do_IRQ_guest(int irq)
 {
     irq_desc_t         *desc = &irq_desc[irq];
@@ -1353,7 +1354,7 @@
         send_guest_pirq(d, irq);
     }
 }
-
+ */
 int pirq_guest_unmask(struct domain *d)
 {
     irq_desc_t    *desc;
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/xen/process.c       Fri Mar 10 15:52:12 2006
@@ -1,3 +1,4 @@
+
 /*
  * Miscellaneous process/domain related routines
  * 
@@ -57,9 +58,6 @@
                        IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
                        IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
                        IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
-
-#define PSCB(x,y)      VCPU(x,y)
-#define PSCBX(x,y)     x->arch.y
 
 #include <xen/sched-if.h>
 
diff -r 551f7935f79a -r 1abf3783975d xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Fri Mar 10 15:25:54 2006
+++ b/xen/arch/ia64/xen/vcpu.c  Fri Mar 10 15:52:12 2006
@@ -36,8 +36,6 @@
 
 // this def for vcpu_regs won't work if kernel stack is present
 //#define      vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
-#define        PSCB(x,y)       VCPU(x,y)
-#define        PSCBX(x,y)      x->arch.y
 
 #define        TRUE    1
 #define        FALSE   0
@@ -66,18 +64,6 @@
 unsigned long phys_translate_count = 0;
 
 unsigned long vcpu_verbose = 0;
-#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
-
-//#define vcpu_quick_region_check(_tr_regions,_ifa)    1
-#define vcpu_quick_region_check(_tr_regions,_ifa)                      \
-       (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
-#define vcpu_quick_region_set(_tr_regions,_ifa)                                
\
-       do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
-
-// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
-#define vcpu_match_tr_entry(_trp,_ifa,_rid)                            \
-       ((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) &&       \
-       (_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
 
 /**************************************************************************
  VCPU general register access routines
@@ -1641,8 +1627,11 @@
 
 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
 {
-       UINT val = PSCB(vcpu,rrs)[reg>>61];
-       *pval = val;
+       if(VMX_DOMAIN(vcpu)){
+               *pval = VMX(vcpu,vrr[reg>>61]);
+       }else{
+               *pval = PSCB(vcpu,rrs)[reg>>61];
+       }
        return (IA64_NO_FAULT);
 }
 
diff -r 551f7935f79a -r 1abf3783975d xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Fri Mar 10 15:25:54 2006
+++ b/xen/include/asm-ia64/vcpu.h       Fri Mar 10 15:52:12 2006
@@ -7,7 +7,6 @@
 //#include "thread.h"
 #include <asm/ia64_int.h>
 #include <public/arch-ia64.h>
-
 typedef        unsigned long UINT64;
 typedef        unsigned int UINT;
 typedef        int BOOLEAN;
@@ -16,7 +15,10 @@
 
 typedef cpu_user_regs_t REGS;
 
-#define VCPU(_v,_x)    _v->arch.privregs->_x
+
+#define VCPU(_v,_x)    (_v->arch.privregs->_x)
+#define PSCB(_v,_x) VCPU(_v,_x)
+#define PSCBX(_v,_x) (_v->arch._x)
 
 #define PRIVOP_ADDR_COUNT
 #ifdef PRIVOP_ADDR_COUNT
@@ -175,4 +177,18 @@
     return (~((1UL << itir_ps(itir)) - 1));
 }
 
+#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
+
+//#define vcpu_quick_region_check(_tr_regions,_ifa) 1
+#define vcpu_quick_region_check(_tr_regions,_ifa)           \
+    (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
+#define vcpu_quick_region_set(_tr_regions,_ifa)             \
+    do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
+
+// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
+#define vcpu_match_tr_entry(_trp,_ifa,_rid)             \
+    ((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) &&   \
+    (_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
+
+
 #endif
diff -r 551f7935f79a -r 1abf3783975d xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Fri Mar 10 15:25:54 2006
+++ b/xen/include/asm-ia64/vmmu.h       Fri Mar 10 15:52:12 2006
@@ -68,11 +68,14 @@
 } search_section_t;
 
 
-typedef enum {
+enum {
         ISIDE_TLB=0,
         DSIDE_TLB=1
-} CACHE_LINE_TYPE;
-
+};
+#define VTLB_PTE_P_BIT      0
+#define VTLB_PTE_IO_BIT     60
+#define VTLB_PTE_IO         (1UL<<VTLB_PTE_IO_BIT)
+#define VTLB_PTE_P         (1UL<<VTLB_PTE_P_BIT)
 typedef struct thash_data {
     union {
         struct {
@@ -86,18 +89,16 @@
             u64 ppn  : 38; // 12-49
             u64 rv2  :  2; // 50-51
             u64 ed   :  1; // 52
-            u64 ig1  :  3; // 53-55
-            u64 len  :  4; // 56-59
-            u64 ig2  :  3; // 60-63
+            u64 ig1  :  3; // 53-63
         };
         struct {
             u64 __rv1 : 53;    // 0-52
             u64 contiguous : 1; //53
             u64 tc : 1;     // 54 TR or TC
-            CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line
+            u64 cl : 1; // 55 I side or D side cache line
             // next extension to ig1, only for TLB instance
-            u64 __ig1  :  4; // 56-59
-            u64 locked  : 1;   // 60 entry locked or not
+            u64 len  :  4; // 56-59
+            u64 io  : 1;       // 60 entry is for io or not
             u64 nomap : 1;   // 61 entry cann't be inserted into machine TLB.
             u64 checked : 1; // 62 for VTLB/VHPT sanity check
             u64 invalid : 1; // 63 invalid entry
@@ -112,12 +113,12 @@
             u64 key  : 24; // 8-31
             u64 rv4  : 32; // 32-63
         };
-        struct {
-            u64 __rv3  : 32; // 0-31
+//        struct {
+//            u64 __rv3  : 32; // 0-31
             // next extension to rv4
-            u64 rid  : 24;  // 32-55
-            u64 __rv4  : 8; // 56-63
-        };
+//            u64 rid  : 24;  // 32-55
+//            u64 __rv4  : 8; // 56-63
+//        };
         u64 itir;
     };
     union {
@@ -136,7 +137,8 @@
     };
     union {
         struct thash_data *next;
-        u64  tr_idx;
+        u64  rid;  // only used in guest TR
+//        u64  tr_idx;
     };
 } thash_data_t;
 
@@ -152,7 +154,7 @@
 
 #define INVALID_VHPT(hdata)     ((hdata)->ti)
 #define INVALID_TLB(hdata)      ((hdata)->ti)
-#define INVALID_TR(hdata)      ((hdata)->invalid)
+#define INVALID_TR(hdata)      (!(hdata)->p)
 #define INVALID_ENTRY(hcb, hdata)       INVALID_VHPT(hdata)
 
 /*        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
@@ -199,18 +201,18 @@
 typedef void (REM_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry);
 typedef void (INS_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry, u64 va);
 
-typedef struct tlb_special {
-        thash_data_t     itr[NITRS];
-        thash_data_t     dtr[NDTRS];
-        struct thash_cb  *vhpt;
-} tlb_special_t;
+//typedef struct tlb_special {
+//        thash_data_t     itr[NITRS];
+//        thash_data_t     dtr[NDTRS];
+//        struct thash_cb  *vhpt;
+//} tlb_special_t;
 
 //typedef struct vhpt_cb {
         //u64     pta;    // pta value.
 //        GET_MFN_FN      *get_mfn;
 //        TTAG_FN         *tag_func;
 //} vhpt_special;
-
+/*
 typedef struct thash_internal {
         thash_data_t *hash_base;
         thash_data_t *cur_cch;  // head of overlap search
@@ -227,7 +229,7 @@
         u64     _curva;         // current address to search
         u64     _eva;
 } thash_internal_t;
-
+ */
 #define  THASH_CB_MAGIC         0x55aa00aa55aa55aaUL
 typedef struct thash_cb {
         /* THASH base information */
@@ -243,6 +245,7 @@
         thash_cch_mem_t *cch_freelist;
         struct vcpu *vcpu;
         PTA     pta;
+        struct thash_cb *vhpt;
         /* VTLB/VHPT common information */
 //        FIND_OVERLAP_FN *find_overlap;
 //        FIND_NEXT_OVL_FN *next_overlap;
@@ -251,15 +254,15 @@
 //        REM_NOTIFIER_FN *remove_notifier;
         /* private information */
 //        thash_internal_t  priv;
-        union {
-                tlb_special_t  *ts;
+//        union {
+//                tlb_special_t  *ts;
 //                vhpt_special   *vs;
-        };
+//        };
         // Internal positon information, buffer and storage etc. TBD
 } thash_cb_t;
 
-#define ITR(hcb,id)             ((hcb)->ts->itr[id])
-#define DTR(hcb,id)             ((hcb)->ts->dtr[id])
+//#define ITR(hcb,id)             ((hcb)->ts->itr[id])
+//#define DTR(hcb,id)             ((hcb)->ts->dtr[id])
 #define INVALIDATE_HASH_HEADER(hcb,hash)    INVALIDATE_TLB_HEADER(hash)
 /*              \
 {           if ((hcb)->ht==THASH_TLB){            \
@@ -290,10 +293,10 @@
  *      4: Return the entry in hash table or collision chain.
  *
  */
-extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
+extern void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa);
 //extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
-extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int 
idx);
-extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, 
char cl);
+//extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, 
int idx);
+extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
 extern u64 get_mfn(struct domain *d, u64 gpfn);
 /*
  * Force to delete a found entry no matter TR or foreign map for TLB.
@@ -344,13 +347,8 @@
  *    NOTES:
  *
  */
-extern void thash_purge_entries(thash_cb_t *hcb, 
-                        thash_data_t *in, search_section_t p_sect);
-extern void thash_purge_entries_ex(thash_cb_t *hcb,
-                        u64 rid, u64 va, u64 sz, 
-                        search_section_t p_sect, 
-                        CACHE_LINE_TYPE cl);
-extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va);
+extern void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps);
+extern void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 
ifa);
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
@@ -363,10 +361,7 @@
  * covering this address rid:va.
  *
  */
-extern thash_data_t *vtlb_lookup(thash_cb_t *hcb, 
-                        thash_data_t *in);
-extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
-                        u64 rid, u64 va,CACHE_LINE_TYPE cl);
+extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,u64 va,int is_data);
 extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, 
int lock);
 
 
@@ -382,6 +377,15 @@
 extern thash_data_t * vhpt_lookup(u64 va);
 extern void machine_tlb_purge(u64 va, u64 ps);
 
+static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 
va, u64 rid)
+{
+    trp->page_flags = pte;
+    trp->itir = itir;
+    trp->vadr = va;
+    trp->rid = rid;
+}
+
+
 //#define   VTLB_DEBUG
 #ifdef   VTLB_DEBUG
 extern void check_vtlb_sanity(thash_cb_t *vtlb);
diff -r 551f7935f79a -r 1abf3783975d xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h       Fri Mar 10 15:25:54 2006
+++ b/xen/include/asm-ia64/vmx_platform.h       Fri Mar 10 15:52:12 2006
@@ -22,7 +22,6 @@
 #include <public/xen.h>
 #include <public/arch-ia64.h>
 #include <asm/hvm/vioapic.h>
-
 struct mmio_list;
 typedef struct virtual_platform_def {
     unsigned long       shared_page_va;
@@ -51,9 +50,8 @@
 } vlapic_t;
 
 extern uint64_t dummy_tmr[];
-#define VCPU(_v,_x)    _v->arch.privregs->_x
-#define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16)
-#define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0])
+#define VLAPIC_ID(l) (uint16_t)(((l)->vcpu->arch.privregs->lid) >> 16)
+#define VLAPIC_IRR(l) ((l)->vcpu->arch.privregs->irr[0])
 struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t 
vector, uint32_t bitmap);
 extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector);
 static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig)
diff -r 551f7935f79a -r 1abf3783975d xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Fri Mar 10 15:25:54 2006
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Fri Mar 10 15:52:12 2006
@@ -66,17 +66,13 @@
 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
 extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
-extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
-#if 0
-extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
-#endif
 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa);
 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa);
-extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa, UINT64 idx);
-extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa, UINT64 idx);
+extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 
itir, UINT64 ifa);
+extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 
itir, UINT64 ifa);
 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
@@ -347,12 +343,14 @@
     *val = vtm_get_itc(vcpu);
     return  IA64_NO_FAULT;
 }
+/*
 static inline
 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
 {
     *pval = VMX(vcpu,vrr[reg>>61]);
     return (IA64_NO_FAULT);
 }
+ */
 /**************************************************************************
  VCPU debug breakpoint register access routines
 **************************************************************************/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.