[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] VTLB optimization: Reuse invalid entry
# HG changeset patch # User awilliam@xxxxxxxxxxx # Node ID 261b95f114a2ddc50aec738b6284f1de89ef5262 # Parent 3e54734e55f39419678afd1ce1a9a96669fa69ef [IA64] VTLB optimization: Reuse invalid entry When inserting entry to vtlb or vhpt, reuse invalid entry. Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx> --- xen/arch/ia64/vmx/vtlb.c | 48 +++++++++++++++++++++-------------------------- 1 files changed, 22 insertions(+), 26 deletions(-) diff -r 3e54734e55f3 -r 261b95f114a2 xen/arch/ia64/vmx/vtlb.c --- a/xen/arch/ia64/vmx/vtlb.c Wed Aug 23 13:26:46 2006 -0600 +++ b/xen/arch/ia64/vmx/vtlb.c Thu Aug 24 11:32:55 2006 -0600 @@ -148,13 +148,17 @@ static void vmx_vhpt_insert(thash_cb_t * rr.rrval = ia64_get_rr(ifa); head = (thash_data_t *)ia64_thash(ifa); tag = ia64_ttag(ifa); - if( INVALID_VHPT(head) ) { - len = head->len; - head->page_flags = pte; - head->len = len; - head->itir = rr.ps << 2; - head->etag = tag; - return; + cch = head; + while (cch) { + if (INVALID_VHPT(cch)) { + len = cch->len; + cch->page_flags = pte; + cch->len = len; + cch->itir = rr.ps << 2; + cch->etag = tag; + return; + } + cch = cch->next; } if(head->len>=MAX_CCN_DEPTH){ @@ -358,24 +362,20 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i u64 tag, len; thash_cb_t *hcb = &v->arch.vtlb; vcpu_get_rr(v, va, &vrr.rrval); -#ifdef VTLB_DEBUG - if (vrr.ps != itir_ps(itir)) { -// machine_tlb_insert(hcb->vcpu, entry); - panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n", - va, vrr.ps, itir_ps(itir)); - return; - } -#endif vrr.ps = itir_ps(itir); VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag); - if( INVALID_TLB(hash_table) ) { - len = hash_table->len; - hash_table->page_flags = pte; - hash_table->len = len; - hash_table->itir=itir; - hash_table->etag=tag; - return; + cch = hash_table; + while (cch) { + if (INVALID_TLB(cch)) { + len = cch->len; + cch->page_flags = pte; + cch->len = len; + cch->itir=itir; + cch->etag=tag; + return; + } + cch = cch->next; } if (hash_table->len>=MAX_CCN_DEPTH){ thash_recycle_cch(hcb, hash_table); @@ -469,10 +469,6 @@ void thash_purge_and_insert(VCPU *v, u64 ps = itir_ps(itir); vcpu_get_rr(current, ifa, &vrr.rrval); mrr.rrval = ia64_get_rr(ifa); -// if (vrr.ps != itir_ps(itir)) { -// printf("not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n", -// ifa, vrr.ps, itir_ps(itir)); -// } if(VMX_DOMAIN(v)){ /* Ensure WB attribute if pte is related to a normal mem page, * which is required by vga acceleration since qemu maps shared _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |