[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] cleanup vtlb code



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 7c7bcf173f8b44ad706c65b30620b79ad152cd97
# Parent  2d2ef3f4c7470659ea69036ae3a2b4b4833e49fd
[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Tue Apr 25 20:53:38 2006 -0600
@@ -128,73 +128,58 @@ purge_machine_tc_by_domid(domid_t domid)
 #endif
 }
 
-static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur)
-{
-//    struct page_info *page;
-    thash_cb_t  *vhpt;
-    PTA pta_value;
-    vcur -= sizeof (thash_cb_t);
-    vhpt = vcur;
-    vhpt->ht = THASH_VHPT;
-    vhpt->vcpu = d;
-    /* Setup guest pta */
-    pta_value.val = 0;
-    pta_value.ve = 1;
-    pta_value.vf = 1;
-    pta_value.size = VCPU_VHPT_SHIFT - 1;    /* 16M*/
-    pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
-    d->arch.arch_vmx.mpta = pta_value.val;
-
-    vhpt->hash = vbase;
-    vhpt->hash_sz = VCPU_VHPT_SIZE/2;
-    vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
-    vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
-    thash_init(vhpt,VCPU_VHPT_SHIFT-1);
-    return vhpt;
-}
-
-
-
-thash_cb_t *init_domain_tlb(struct vcpu *d)
+static void init_domain_vhpt(struct vcpu *v)
 {
     struct page_info *page;
-    void    *vbase, *vhptbase, *vcur;
-    thash_cb_t  *tlb;
-
+    void * vbase;
     page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
-        panic("No enough contiguous memory for init_domain_mm\n");
-    }
-    vhptbase = page_to_virt(page);
-    memset(vhptbase, 0, VCPU_VHPT_SIZE);
-    printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase);
-    vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE;
-    vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE);
-    vcur -= sizeof (thash_cb_t);
-    tlb = vcur;
-    tlb->ht = THASH_TLB;
-    tlb->vcpu = d;
-    tlb->vhpt = init_domain_vhpt(d,vhptbase,vbase);
-//    tlb->hash_func = machine_thash;
-    tlb->hash = vbase;
-    tlb->hash_sz = VCPU_VTLB_SIZE/2;
-    tlb->cch_buf = (void *)(vbase + tlb->hash_sz);
-    tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
-//    tlb->recycle_notifier = recycle_message;
-    thash_init(tlb,VCPU_VTLB_SHIFT-1);
-    return tlb;
+        panic("No enough contiguous memory for init_domain_vhpt\n");
+    }
+    vbase = page_to_virt(page);
+    memset(vbase, 0, VCPU_VHPT_SIZE);
+    printk("Allocate domain tlb at 0x%p\n", vbase);
+    
+    VHPT(v,hash) = vbase;
+    VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
+    VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
+    VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
+    thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
+    v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
+}
+
+
+
+void init_domain_tlb(struct vcpu *v)
+{
+    struct page_info *page;
+    void * vbase;
+    init_domain_vhpt(v);
+    page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
+    if ( page == NULL ) {
+        panic("No enough contiguous memory for init_domain_tlb\n");
+    }
+    vbase = page_to_virt(page);
+    memset(vbase, 0, VCPU_VTLB_SIZE);
+    printk("Allocate domain tlb at 0x%p\n", vbase);
+    
+    VTLB(v,hash) = vbase;
+    VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
+    VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
+    VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
+    thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
 }
 
 void free_domain_tlb(struct vcpu *v)
 {
     struct page_info *page;
-    void *vhptbase;
-    thash_cb_t *tlb;
-
-    if ( v->arch.vtlb ) {
-        tlb = v->arch.vtlb;
-        vhptbase = (void*)((u64)tlb + sizeof (thash_cb_t)) - VCPU_VHPT_SIZE;
-        page = virt_to_page(vhptbase);
+
+    if ( v->arch.vtlb.hash) {
+        page = virt_to_page(v->arch.vtlb.hash);
+        free_domheap_pages(page, VCPU_VTLB_ORDER);
+    }
+    if ( v->arch.vhpt.hash) {
+        page = virt_to_page(v->arch.vhpt.hash);
         free_domheap_pages(page, VCPU_VHPT_ORDER);
     }
 }
@@ -324,17 +309,15 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
     u64     gpip=0;   // guest physical IP
     u64     *vpa;
     thash_data_t    *tlb;
-    thash_cb_t *hcb;
     u64     mfn;
 
     if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
         gpip = gip;
     }
     else {
-           hcb = vmx_vcpu_get_vtlb(vcpu);
-        tlb = vtlb_lookup(hcb, gip, ISIDE_TLB);
+        tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
 //        if( tlb == NULL )
-//             tlb = vtlb_lookup(hcb, gip, DSIDE_TLB );
+//             tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
         if (tlb)
                gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & 
(PSIZE(tlb->ps)-1) );
     }
@@ -357,8 +340,6 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
 {
     int slot;
     u64 ps, va;
-    thash_cb_t  *hcb;
-
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
     slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
@@ -367,8 +348,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_and_insert(hcb, pte, itir, ifa);
+    thash_purge_and_insert(vcpu, pte, itir, ifa);
     return IA64_NO_FAULT;
 }
 
@@ -376,8 +356,6 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
 {
     int slot;
     u64 ps, va, gpfn;
-    thash_cb_t  *hcb;
-
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
     slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
@@ -386,11 +364,10 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
     gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
-    if(__gpfn_is_io(vcpu->domain,gpfn))
+    if(VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain,gpfn))
         pte |= VTLB_PTE_IO;
-    thash_purge_and_insert(hcb, pte, itir, ifa);
+    thash_purge_and_insert(vcpu, pte, itir, ifa);
     return IA64_NO_FAULT;
 
 }
@@ -402,7 +379,6 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
 {
     int index;
     u64 ps, va, rid;
-    thash_cb_t  *hcb;
 
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
@@ -412,8 +388,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_entries(hcb, va, ps);
+    thash_purge_entries(vcpu, va, ps);
     vcpu_get_rr(vcpu, va, &rid);
     rid = rid& RR_RID_MASK;
     vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, 
rid);
@@ -426,7 +401,6 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
 {
     int index;
     u64 ps, va, gpfn, rid;
-    thash_cb_t  *hcb;
 
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
@@ -436,8 +410,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_entries(hcb, va, ps);
+    thash_purge_entries(vcpu, va, ps);
     gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
     if(__gpfn_is_io(vcpu->domain,gpfn))
         pte |= VTLB_PTE_IO;
@@ -454,7 +427,6 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
 {
     int index;
     u64 va;
-    thash_cb_t  *hcb;
 
     va = PAGEALIGN(ifa, ps);
     index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
@@ -462,8 +434,7 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
         vcpu->arch.dtrs[index].pte.p=0;
         index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_entries(hcb, va, ps);
+    thash_purge_entries(vcpu, va, ps);
     return IA64_NO_FAULT;
 }
 
@@ -471,7 +442,6 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
 {
     int index;
     u64 va;
-    thash_cb_t  *hcb;
 
     va = PAGEALIGN(ifa, ps);
     index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
@@ -479,26 +449,21 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
         vcpu->arch.itrs[index].pte.p=0;
         index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_entries(hcb, va, ps);
+    thash_purge_entries(vcpu, va, ps);
     return IA64_NO_FAULT;
 }
 
 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
 {
-    thash_cb_t  *hcb;
     va = PAGEALIGN(va, ps);
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_entries(hcb, va, ps);
+    thash_purge_entries(vcpu, va, ps);
     return IA64_NO_FAULT;
 }
 
 
 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
 {
-    thash_cb_t  *hcb;
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    thash_purge_all(hcb);
+    thash_purge_all(vcpu);
     return IA64_NO_FAULT;
 }
 
@@ -554,12 +519,10 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
 {
     thash_data_t *data;
-    thash_cb_t  *hcb;
     ISR visr,pt_isr;
     REGS *regs;
     u64 vhpt_adr;
     IA64_PSR vpsr;
-    hcb = vmx_vcpu_get_vtlb(vcpu);
     regs=vcpu_regs(vcpu);
     pt_isr.val=VMX(vcpu,cr_isr);
     visr.val=0;
@@ -570,7 +533,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
          visr.ni=1;
     }
     visr.na=1;
-    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
+    data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
     if(data){
         if(data->p==0){
             visr.na=1;
@@ -618,7 +581,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
         }
         else{
             vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
-            data = vtlb_lookup(hcb, vhpt_adr, DSIDE_TLB);
+            data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
             if(data){
                 if(vpsr.ic){
                     vcpu_set_isr(vcpu, visr.val);
@@ -648,15 +611,13 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT6
 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
 {
     thash_data_t *data;
-    thash_cb_t  *hcb;
     PTA vpta;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
     if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
         *key=1;
         return IA64_NO_FAULT;
     }
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
+    data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
     if(!data||!data->p){
         *key=1;
     }else{
@@ -688,13 +649,11 @@ __domain_va_to_ma(unsigned long va, unsi
 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
 {
     unsigned long      mpfn, gpfn, m, n = *len;
-    thash_cb_t         *vtlb;
     unsigned long      end;    /* end of the area mapped by current entry */
     thash_data_t       *entry;
     struct vcpu *v = current;
 
-    vtlb = vmx_vcpu_get_vtlb(v); 
-    entry = vtlb_lookup(vtlb, va, DSIDE_TLB);
+    entry = vtlb_lookup(v, va, DSIDE_TLB);
     if (entry == NULL)
        return -EFAULT;
 
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_init.c      Tue Apr 25 20:53:38 2006 -0600
@@ -279,8 +279,9 @@ vmx_final_setup_guest(struct vcpu *v)
        /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
         * to this solution. Maybe it can be deferred until we know created
         * one as vmx domain */
-       v->arch.vtlb = init_domain_tlb(v);
-
+#ifndef HASH_VHPT     
+        init_domain_tlb(v);
+#endif
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
 
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Apr 25 20:53:38 2006 -0600
@@ -166,9 +166,9 @@ vmx_init_all_rr(VCPU *vcpu)
        VMX(vcpu,vrr[VRN6]) = 0x660;
        VMX(vcpu,vrr[VRN7]) = 0x760;
 #if 0
-       VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
-       VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
-       VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
+       VMX(vcpu,mrr5) = vrrtomrr(vcpu, 0x38);
+       VMX(vcpu,mrr6) = vrrtomrr(vcpu, 0x60);
+       VMX(vcpu,mrr7) = vrrtomrr(vcpu, 0x60);
 #endif
 }
 
@@ -177,8 +177,8 @@ vmx_load_all_rr(VCPU *vcpu)
 {
        unsigned long psr;
        ia64_rr phy_rr;
+
        extern void * pal_vaddr;
-
        local_irq_save(psr);
 
 
@@ -189,37 +189,37 @@ vmx_load_all_rr(VCPU *vcpu)
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
                phy_rr.rrval = vcpu->arch.metaphysical_rr0;
-//             phy_rr.ps = PAGE_SHIFT;
+               //phy_rr.ps = PAGE_SHIFT;
                phy_rr.ve = 1;
 
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
                phy_rr.rrval = vcpu->arch.metaphysical_rr4;
-//             phy_rr.ps = PAGE_SHIFT;
+               //phy_rr.ps = PAGE_SHIFT;
                phy_rr.ve = 1;
 
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
        } else {
                ia64_set_rr((VRN0 << VRN_SHIFT),
-                            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
+                            vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
                ia64_set_rr((VRN4 << VRN_SHIFT),
-                            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
+                            vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
        }
 
        /* rr567 will be postponed to last point when resuming back to guest */
        ia64_set_rr((VRN1 << VRN_SHIFT),
-                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
+                    vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
        ia64_set_rr((VRN2 << VRN_SHIFT),
-                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
+                    vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
        ia64_set_rr((VRN3 << VRN_SHIFT),
-                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
+                    vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
        ia64_set_rr((VRN5 << VRN_SHIFT),
-                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+                    vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
        ia64_set_rr((VRN6 << VRN_SHIFT),
-                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
-       vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
+                    vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+       vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
                        (void *)vcpu->domain->shared_info,
                        (void *)vcpu->arch.privregs,
-                       (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
+                       (void *)vcpu->arch.vhpt.hash, pal_vaddr );
        ia64_set_pta(vcpu->arch.arch_vmx.mpta);
 
        ia64_srlz_d();
@@ -262,10 +262,10 @@ switch_to_virtual_rid(VCPU *vcpu)
     psr=ia64_clear_ic();
 
     vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
+    ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
     ia64_srlz_d();
     vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
+    ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
     ia64_srlz_d();
     ia64_set_psr(psr);
     ia64_srlz_i();
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c   Tue Apr 25 20:53:38 2006 -0600
@@ -305,10 +305,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     u64 vhpt_adr, gppa;
     ISR misr;
 //    REGS *regs;
-    thash_cb_t *vtlb;
     thash_data_t *data;
     VCPU *v = current;
-    vtlb=vmx_vcpu_get_vtlb(v);
 #ifdef  VTLB_DEBUG
     check_vtlb_sanity(vtlb);
     dump_vtlb(vtlb);
@@ -344,7 +342,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
 
 //    prepare_if_physical_mode(v);
 
-    if((data=vtlb_lookup(vtlb, vadr,type))!=0){
+    if((data=vtlb_lookup(v, vadr,type))!=0){
 //     gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
 //        if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
         if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
@@ -362,7 +360,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
         }
         else{
  */
-            thash_vhpt_insert(vtlb->vhpt,data->page_flags, data->itir ,vadr);
+            thash_vhpt_insert(&v->arch.vhpt,data->page_flags, data->itir 
,vadr);
 //        }
 //         }
     }else if(type == DSIDE_TLB){
@@ -383,7 +381,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
             }
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup(vtlb, vhpt_adr, 
DSIDE_TLB)){
+            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
                 if(vpsr.ic){
                     vcpu_set_isr(v, misr.val);
                     dtlb_fault(v, vadr);
@@ -425,7 +423,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
             return IA64_FAULT;
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) || vtlb_lookup(vtlb, vhpt_adr, 
DSIDE_TLB)){
+            if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
                 if(!vpsr.ic){
                     misr.ni=1;
                 }
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Tue Apr 25 20:53:38 2006 -0600
@@ -190,13 +190,6 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
 }
 
 
-thash_cb_t *
-vmx_vcpu_get_vtlb(VCPU *vcpu)
-{
-    return vcpu->arch.vtlb;
-}
-
-
 struct virtual_platform_def *
 vmx_vcpu_get_plat(VCPU *vcpu)
 {
@@ -208,7 +201,6 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
 {
     ia64_rr oldrr,newrr;
-    thash_cb_t *hcb;
     extern void * pal_vaddr;
 
     vcpu_get_rr(vcpu, reg, &oldrr.rrval);
@@ -216,18 +208,17 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
     if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
         panic_domain (NULL, "use of invalid rid %lx\n", newrr.rid);
     if(oldrr.ps!=newrr.ps){
-        hcb = vmx_vcpu_get_vtlb(vcpu);
-        thash_purge_all(hcb);
+        thash_purge_all(vcpu);
     }
     VMX(vcpu,vrr[reg>>61]) = val;
     switch((u64)(reg>>61)) {
     case VRN7:
-        vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
+        vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
         (void *)vcpu->arch.privregs,
-        (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
+        (void *)vcpu->arch.vhpt.hash, pal_vaddr );
        break;
     default:
-        ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
+        ia64_set_rr(reg,vrrtomrr(vcpu,val));
         break;
     }
 
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c  Tue Apr 25 20:53:38 2006 -0600
@@ -36,7 +36,7 @@ thash_data_t *__alloc_chain(thash_cb_t *
 
 static void cch_mem_init(thash_cb_t *hcb)
 {
-    thash_cch_mem_t *p, *q;
+    thash_data_t *p, *q;
 
     hcb->cch_freelist = p = hcb->cch_buf;
 
@@ -49,11 +49,11 @@ static void cch_mem_init(thash_cb_t *hcb
 
 static thash_data_t *cch_alloc(thash_cb_t *hcb)
 {
-    thash_cch_mem_t *p;
+    thash_data_t *p;
 
     if ( (p = hcb->cch_freelist) != NULL ) {
         hcb->cch_freelist = p->next;
-        return (thash_data_t *)p;
+        return p;
     }else{
         return NULL;
     }
@@ -61,10 +61,8 @@ static thash_data_t *cch_alloc(thash_cb_
 
 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
 {
-    thash_cch_mem_t *p = (thash_cch_mem_t*)cch;
-
-    p->next = hcb->cch_freelist;
-    hcb->cch_freelist = p;
+    cch->next = hcb->cch_freelist;
+    hcb->cch_freelist = cch;
 }
 
 /*
@@ -181,15 +179,16 @@ int __tlb_to_vhpt(thash_cb_t *hcb, thash
 
 static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
 {
-    thash_data_t *prev, *next;
-    prev = hash; next= hash->next;
-    while(next){
-       prev=next;
-       next=prev->next;
-       cch_free(hcb, prev);
-    }
-    hash->next = NULL;
-    hash->len = 0;
+    thash_data_t *p;
+    if(hash->next){
+        p=hash->next;
+        while(p->next)
+            p=p->next;
+        p->next=hcb->cch_freelist;
+        hcb->cch_freelist=hash->next;
+        hash->next=0;
+        hash->len=0;
+    }
 }
 
 /*  vhpt only has entries with PAGE_SIZE page size */
@@ -199,8 +198,6 @@ void thash_vhpt_insert(thash_cb_t *hcb, 
     thash_data_t   vhpt_entry, *hash_table, *cch;
     vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
     vhpt_entry.itir=itir;
-
-//    ia64_rr vrr;
 
     if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
         return;
@@ -469,10 +466,11 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
 /*
  * Purge entries in VTLB and VHPT
  */
-void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps)
-{
-    vtlb_purge(hcb, va, ps);
-    vhpt_purge(hcb->vhpt, va, ps);
+void thash_purge_entries(VCPU *v, u64 va, u64 ps)
+{
+    if(vcpu_quick_region_check(v->arch.tc_regions,va))
+        vtlb_purge(&v->arch.vtlb, va, ps);
+    vhpt_purge(&v->arch.vhpt, va, ps);
 }
 
 
@@ -480,18 +478,21 @@ void thash_purge_entries(thash_cb_t *hcb
  * Purge overlap TCs and then insert the new entry to emulate itc ops.
  *    Notes: Only TC entry can purge and insert.
  */
-void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
+void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa)
 {
     u64 ps, va;
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa,ps);
-    vtlb_purge(hcb, va, ps);
-    vhpt_purge(hcb->vhpt, va, ps);
-    if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO))
-        vtlb_insert(hcb, pte, itir, va);
+    if(vcpu_quick_region_check(v->arch.tc_regions,va))
+        vtlb_purge(&v->arch.vtlb, va, ps);
+    vhpt_purge(&v->arch.vhpt, va, ps);
+    if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)){
+        vtlb_insert(&v->arch.vtlb, pte, itir, va);
+       vcpu_quick_region_set(PSCBX(v,tc_regions),va);
+    }  
     if(!(pte&VTLB_PTE_IO)){
         va = PAGEALIGN(ifa,PAGE_SHIFT);
-        thash_vhpt_insert(hcb->vhpt, pte, itir, va);
+        thash_vhpt_insert(&v->arch.vhpt, pte, itir, va);
     }
 }
 
@@ -503,13 +504,14 @@ void thash_purge_and_insert(thash_cb_t *
  */
 
 // TODO: add sections.
-void thash_purge_all(thash_cb_t *hcb)
+void thash_purge_all(VCPU *v)
 {
     thash_data_t    *hash_table;
     /* thash_data_t    *entry; */
-    thash_cb_t  *vhpt;
+    thash_cb_t  *hcb,*vhpt;
     /* u64 i, start, end; */
-
+    hcb =&v->arch.vtlb;
+    vhpt =&v->arch.vhpt;
 #ifdef  VTLB_DEBUG
        extern u64  sanity_check;
     static u64 statistics_before_purge_all=0;
@@ -526,7 +528,6 @@ void thash_purge_all(thash_cb_t *hcb)
     }
     cch_mem_init (hcb);
 
-    vhpt = hcb->vhpt;
     hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
     for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
         INVALIDATE_VHPT_HEADER(hash_table);
@@ -544,18 +545,22 @@ void thash_purge_all(thash_cb_t *hcb)
  *  in: TLB format for both VHPT & TLB.
  */
 
-thash_data_t *vtlb_lookup(thash_cb_t *hcb, u64 va,int is_data)
+thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
 {
     thash_data_t    *hash_table, *cch;
     u64     tag;
     ia64_rr vrr;
-
+    thash_cb_t * hcb= &v->arch.vtlb;
     ASSERT ( hcb->ht == THASH_TLB );
 
-    cch = __vtr_lookup(hcb->vcpu, va, is_data);;
+    cch = __vtr_lookup(v, va, is_data);;
     if ( cch ) return cch;
 
-    vcpu_get_rr(hcb->vcpu,va,&vrr.rrval);
+    if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
+        return NULL;
+    
+
+    vcpu_get_rr(v,va,&vrr.rrval);
     hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
 
     if ( INVALID_ENTRY(hcb, hash_table ) )
@@ -578,7 +583,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
     thash_data_t    *hash_table;
 
     cch_mem_init (hcb);
-    hcb->magic = THASH_CB_MAGIC;
     hcb->pta.val = (unsigned long)hcb->hash;
     hcb->pta.vf = 1;
     hcb->pta.ve = 1;
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Tue Apr 25 20:53:38 2006 -0600
@@ -74,7 +74,8 @@ struct arch_vcpu {
        unsigned long dtlb_pte;
        unsigned long irr[4];
        unsigned long insvc[4];
-    unsigned long iva;
+       unsigned long tc_regions;
+       unsigned long iva;
        unsigned long dcr;
        unsigned long itc;
        unsigned long domain_itm;
@@ -91,7 +92,8 @@ struct arch_vcpu {
     int ending_rid;            /* one beyond highest RID assigned to domain */
     struct thread_struct _thread;      // this must be last
 
-    thash_cb_t *vtlb;
+    thash_cb_t vtlb;
+    thash_cb_t vhpt;
     char irq_new_pending;
     char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
     char hypercall_continuation;
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/include/asm-ia64/vmmu.h       Tue Apr 25 20:53:38 2006 -0600
@@ -30,7 +30,8 @@
 #define         VCPU_VHPT_SHIFT          (24)    // 16M for VTLB
 #define         VCPU_VHPT_SIZE           (1UL<<VCPU_VHPT_SHIFT)
 #define         VCPU_VHPT_ORDER          (VCPU_VHPT_SHIFT - PAGE_SHIFT)
-
+#define                VTLB(v,_x)              (v->arch.vtlb._x)
+#define                VHPT(v,_x)              (v->arch.vhpt._x)
 #ifndef __ASSEMBLY__
 
 #include <xen/config.h>
@@ -180,12 +181,6 @@ typedef enum {
 } THASH_TYPE;
 
 struct thash_cb;
-typedef union thash_cch_mem {
-        thash_data_t    data;
-        union thash_cch_mem *next;
-} thash_cch_mem_t;
-
-
 /*
  * Use to calculate the HASH index of thash_data_t.
  */
@@ -230,11 +225,11 @@ typedef struct thash_internal {
         u64     _eva;
 } thash_internal_t;
  */
-#define  THASH_CB_MAGIC         0x55aa00aa55aa55aaUL
+//#define  THASH_CB_MAGIC         0x55aa00aa55aa55aaUL
 typedef struct thash_cb {
         /* THASH base information */
-        THASH_TYPE      ht;     // For TLB or VHPT
-        u64             magic;
+//        THASH_TYPE      ht;     // For TLB or VHPT
+//        u64             magic;
         thash_data_t    *hash; // hash table pointer, aligned at thash_sz.
         u64     hash_sz;        // size of above data.
         void    *cch_buf;       // base address of collision chain.
@@ -242,10 +237,10 @@ typedef struct thash_cb {
 //        THASH_FN        *hash_func;
 //        GET_RR_FN       *get_rr_fn;
 //        RECYCLE_FN      *recycle_notifier;
-        thash_cch_mem_t *cch_freelist;
-        struct vcpu *vcpu;
+        thash_data_t *cch_freelist;
+//        struct vcpu *vcpu;
         PTA     pta;
-        struct thash_cb *vhpt;
+//        struct thash_cb *vhpt;
         /* VTLB/VHPT common information */
 //        FIND_OVERLAP_FN *find_overlap;
 //        FIND_NEXT_OVL_FN *next_overlap;
@@ -347,21 +342,21 @@ extern thash_data_t *thash_find_next_ove
  *    NOTES:
  *
  */
-extern void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps);
-extern void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 
ifa);
+extern void thash_purge_entries(struct vcpu *v, u64 va, u64 ps);
+extern void thash_purge_and_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa);
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
  *
  */
-extern void thash_purge_all(thash_cb_t *hcb);
+extern void thash_purge_all(struct vcpu *v);
 
 /*
  * Lookup the hash table and its collision chain to find an entry
  * covering this address rid:va.
  *
  */
-extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,u64 va,int is_data);
+extern thash_data_t *vtlb_lookup(struct vcpu *v,u64 va,int is_data);
 extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, 
int lock);
 
 
@@ -372,7 +367,7 @@ extern void purge_machine_tc_by_domid(do
 extern void purge_machine_tc_by_domid(domid_t domid);
 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
 extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
-extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void init_domain_tlb(struct vcpu *d);
 extern void free_domain_tlb(struct vcpu *v);
 extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
 extern thash_data_t * vhpt_lookup(u64 va);
diff -r 2d2ef3f4c747 -r 7c7bcf173f8b xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Tue Apr 25 17:05:16 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Tue Apr 25 20:53:38 2006 -0600
@@ -64,8 +64,6 @@ extern UINT64 vmx_vcpu_sync_mpsr(UINT64 
 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
-extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
-extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
@@ -461,7 +459,7 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
 #define redistribute_rid(rid)  (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | 
(((rid) >> 8) & 0xff))
 #endif
 static inline unsigned long
-vmx_vrrtomrr(VCPU *v, unsigned long val)
+vrrtomrr(VCPU *v, unsigned long val)
 {
     ia64_rr rr;
 
@@ -477,6 +475,17 @@ vmx_vrrtomrr(VCPU *v, unsigned long val)
 #endif 
 
 }
+static inline thash_cb_t *
+vmx_vcpu_get_vtlb(VCPU *vcpu)
+{
+    return &vcpu->arch.vtlb;
+}
+
+static inline thash_cb_t *
+vcpu_get_vhpt(VCPU *vcpu)
+{
+    return &vcpu->arch.vhpt;
+}
 
 #define check_work_pending(v)  \
     (event_pending((v)) || ((v)->arch.irq_new_pending))

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.