[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] Handle speculative vhpt walk
# HG changeset patch # User Alex Williamson <alex.williamson@xxxxxx> # Date 1178833590 21600 # Node ID 31be207e005eaf178c87c047d8827998d6122282 # Parent 8745300bec4ec254b3e2426e4fb5e3f4a9e0e0bc [IA64] Handle speculative vhpt walk Since processor may support speculative VHPT walk, The long format VHPT head entry needs to be disabled before programming it. Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx> --- xen/arch/ia64/vmx/vmx_ivt.S | 20 +++++++++++++------- xen/arch/ia64/vmx/vtlb.c | 17 ++++++++--------- xen/arch/ia64/xen/vhpt.c | 8 +++++--- 3 files changed, 26 insertions(+), 19 deletions(-) diff -r 8745300bec4e -r 31be207e005e xen/arch/ia64/vmx/vmx_ivt.S --- a/xen/arch/ia64/vmx/vmx_ivt.S Thu May 10 15:18:27 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_ivt.S Thu May 10 15:46:30 2007 -0600 @@ -168,11 +168,11 @@ vmx_itlb_loop: adds r16 = VLE_TITAG_OFFSET, r17 adds r19 = VLE_CCHAIN_OFFSET, r17 ;; - ld8 r22 = [r16] + ld8 r24 = [r16] ld8 r23 = [r19] ;; lfetch [r23] - cmp.eq p6,p7 = r20, r22 + cmp.eq p6,p7 = r20, r24 ;; (p7)mov r17 = r23; (p7)br.sptk vmx_itlb_loop @@ -180,10 +180,12 @@ vmx_itlb_loop: ld8 r25 = [r17] ld8 r27 = [r18] ld8 r29 = [r28] + dep r22 = -1,r24,63,1 //set ti=1 ;; st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET extr.u r19 = r27, 56, 4 + mf ;; ld8 r29 = [r16] ld8 r22 = [r28] @@ -191,10 +193,11 @@ vmx_itlb_loop: dep r25 = r19, r25, 56, 4 ;; st8 [r16] = r22 - st8 [r28] = r29 + st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET st8 [r18] = r25 st8 [r17] = r27 ;; + st8.rel [r28] = r24 itc.i r25 dv_serialize_data mov r17=cr.isr @@ -246,11 +249,11 @@ vmx_dtlb_loop: adds r16 = VLE_TITAG_OFFSET, r17 adds r19 = VLE_CCHAIN_OFFSET, r17 ;; - ld8 r22 = [r16] + ld8 r24 = [r16] ld8 r23 = [r19] ;; lfetch [r23] - cmp.eq p6,p7 = r20, r22 + cmp.eq p6,p7 = r20, r24 ;; (p7)mov r17 = r23; (p7)br.sptk vmx_dtlb_loop @@ -258,10 +261,12 @@ vmx_dtlb_loop: ld8 r25 = [r17] ld8 r27 = [r18] ld8 r29 = [r28] + dep r22 = -1,r24,63,1 //set ti=1 ;; st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET extr.u r19 = r27, 56, 4 + mf ;; ld8 r29 = [r16] ld8 r22 = [r28] @@ -269,10 +274,11 @@ vmx_dtlb_loop: dep r25 = r19, r25, 56, 4 ;; st8 [r16] = r22 - st8 [r28] = r29 + st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET st8 [r18] = r25 st8 [r17] = r27 - ;; + ;; + st8.rel [r28] = r24 itc.d r25 dv_serialize_data mov r17=cr.isr diff -r 8745300bec4e -r 31be207e005e xen/arch/ia64/vmx/vtlb.c --- a/xen/arch/ia64/vmx/vtlb.c Thu May 10 15:18:27 2007 -0600 +++ b/xen/arch/ia64/vmx/vtlb.c Thu May 10 15:46:30 2007 -0600 @@ -141,7 +141,7 @@ static void thash_recycle_cch(thash_cb_t static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa) { - u64 tag ,len; + u64 tag; ia64_rr rr; thash_data_t *head, *cch; pte = pte & ~PAGE_FLAGS_RV_MASK; @@ -155,14 +155,12 @@ static void vmx_vhpt_insert(thash_cb_t * cch = cch->next; } if (cch) { - if (cch == head) { - len = head->len; - } else { + if (cch != head) { local_irq_disable(); cch->page_flags = head->page_flags; cch->itir = head->itir; cch->etag = head->etag; - len = head->len; + head->ti = 1; local_irq_enable(); } } @@ -175,16 +173,17 @@ static void vmx_vhpt_insert(thash_cb_t * } local_irq_disable(); *cch = *head; + head->ti = 1; head->next = cch; - len = cch->len+1; + head->len = cch->len + 1; cch->len = 0; local_irq_enable(); } - + //here head is invalid + wmb(); head->page_flags=pte; - head->len = len; head->itir = rr.ps << 2; - head->etag=tag; + *(volatile unsigned long*)&head->etag = tag; return; } diff -r 8745300bec4e -r 31be207e005e xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Thu May 10 15:18:27 2007 -0600 +++ b/xen/arch/ia64/xen/vhpt.c Thu May 10 15:46:30 2007 -0600 @@ -78,11 +78,13 @@ void vhpt_insert (unsigned long vadr, un struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr); unsigned long tag = ia64_ttag (vadr); - /* No need to first disable the entry, since VHPT is per LP - and VHPT is TR mapped. */ + /* Even though VHPT is per VCPU, still need to first disable the entry, + * because the processor may support speculative VHPT walk. */ + vlfe->ti_tag = INVALID_TI_TAG; + wmb(); vlfe->itir = logps; vlfe->page_flags = pte | _PAGE_P; - vlfe->ti_tag = tag; + *(volatile unsigned long*)&vlfe->ti_tag = tag; } void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |