[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Add support for hvm live migration



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1203015055 25200
# Node ID 8e89261a8308522699fe7ba0675c7777d13cb51c
# Parent  9e9ba5185ef122775a13b080252320d10e0c8610
[IA64] Add support for hvm live migration

This is a naive implementation of log dirty mode for HVM.
(I gave up on writing a dirty-bit fault handler in assembler.)

An HVM domain with PV drivers can't be migrated yet.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_fault.c |   45 ++++++++++++++++++++++++++++++++++++++++++
 xen/arch/ia64/vmx/vmx_ivt.S   |   38 +++++++++++++++++++++++++++++++++--
 xen/arch/ia64/vmx/vtlb.c      |   18 ++++++++++++++--
 3 files changed, 97 insertions(+), 4 deletions(-)

diff -r 9e9ba5185ef1 -r 8e89261a8308 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Thu Feb 14 09:34:27 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Thu Feb 14 11:50:55 2008 -0700
@@ -52,6 +52,7 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
 #include <asm/vmx_pal.h>
+#include <asm/shadow.h>
 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
 
@@ -520,3 +521,47 @@ try_again:
     itlb_fault(v, vadr);
     return IA64_FAULT;
 }
+
+void
+vmx_ia64_shadow_fault(u64 ifa, u64 isr, u64 mpa, REGS *regs)
+{
+    struct vcpu *v = current;
+    struct domain *d = v->domain;
+    u64 gpfn, pte;
+    thash_data_t *data;
+
+    if (!shadow_mode_enabled(d))
+        goto inject_dirty_bit;
+
+    gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT);
+    data = vhpt_lookup(ifa);
+    if (data) {
+        pte = data->page_flags;
+        // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK));
+        if (!(pte & _PAGE_VIRT_D))
+            goto inject_dirty_bit;
+        data->page_flags = pte | _PAGE_D;
+    } else {
+        data = vtlb_lookup(v, ifa, DSIDE_TLB);
+        if (data) {
+            if (!(data->page_flags & _PAGE_VIRT_D))
+                goto inject_dirty_bit;
+        }
+        pte = 0;
+    }
+
+    /* Set the dirty bit in the bitmap.  */
+    shadow_mark_page_dirty(d, gpfn);
+
+    /* Retry */
+    atomic64_inc(&d->arch.shadow_fault_count);
+    ia64_ptcl(ifa, PAGE_SHIFT << 2);
+    return;
+
+inject_dirty_bit:
+    /* Reflect. no need to purge.  */
+    VCPU(v, isr) = isr;
+    set_ifa_itir_iha (v, ifa, 1, 1, 1);
+    inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR);
+    return;
+}
diff -r 9e9ba5185ef1 -r 8e89261a8308 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Feb 14 09:34:27 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Thu Feb 14 11:50:55 2008 -0700
@@ -433,8 +433,16 @@ END(vmx_dkey_miss)
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
 ENTRY(vmx_dirty_bit)
-    VMX_DBG_FAULT(8)
-    VMX_REFLECT(8)
+    mov r29=cr.ipsr
+    mov r31=pr
+    ;;
+    mov r19=cr.ifa
+    tbit.z p6,p0=r29,IA64_PSR_VM_BIT
+(p6)br.spnt.many vmx_fault_8
+    ;;
+    tpa r19=r19
+    br.sptk vmx_dispatch_shadow_fault
+    VMX_FAULT(8)
 END(vmx_dirty_bit)
 
     .org vmx_ia64_ivt+0x2400
@@ -1332,6 +1340,30 @@ ENTRY(vmx_dispatch_interrupt)
     br.call.sptk.many b6=ia64_handle_irq
 END(vmx_dispatch_interrupt)
 
+
+ENTRY(vmx_dispatch_shadow_fault)
+    VMX_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,4,0
+    mov out0=cr.ifa
+    mov out1=cr.isr
+    mov out2=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    (p15) ssm psr.i               // restore psr.i
+    movl r14=ia64_leave_hypervisor
+    ;;
+    VMX_SAVE_REST
+    mov rp=r14
+    ;;
+    P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
+    adds out3=16,r12
+    br.call.sptk.many b6=vmx_ia64_shadow_fault
+END(vmx_dispatch_shadow_fault)
+
 .Lvmx_dispatch_reflection_string:
     .asciz "vmx_dispatch_reflection\n"
 .Lvmx_dispatch_virtualization_fault_string:
@@ -1340,3 +1372,5 @@ END(vmx_dispatch_interrupt)
     .asciz "vmx_dispatch_vexirq\n"
 .Lvmx_dispatch_tlb_miss_string:
     .asciz "vmx_dispatch_tlb_miss\n"
+.Lvmx_dispatch_shadow_fault_string:
+    .asciz "vmx_dispatch_shadow_fault\n"
diff -r 9e9ba5185ef1 -r 8e89261a8308 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Thu Feb 14 09:34:27 2008 -0700
+++ b/xen/arch/ia64/vmx/vtlb.c  Thu Feb 14 11:50:55 2008 -0700
@@ -22,6 +22,7 @@
 
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_phy_mode.h>
+#include <asm/shadow.h>
 
 static thash_data_t *__alloc_chain(thash_cb_t *);
 
@@ -132,7 +133,7 @@ static void vmx_vhpt_insert(thash_cb_t *
     ia64_rr rr;
     thash_data_t *head, *cch;
 
-    pte = pte & ~PAGE_FLAGS_RV_MASK;
+    pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
     rr.rrval = ia64_get_rr(ifa);
     head = (thash_data_t *)ia64_thash(ifa);
     tag = ia64_ttag(ifa);
@@ -514,13 +515,14 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
     u64 ps, ps_mask, paddr, maddr;
 //    ia64_rr rr;
     union pte_flags phy_pte;
+    struct domain *d = v->domain;
 
     ps = itir_ps(itir);
     ps_mask = ~((1UL << ps) - 1);
     phy_pte.val = *pte;
     paddr = *pte;
     paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
-    maddr = lookup_domain_mpa(v->domain, paddr, NULL);
+    maddr = lookup_domain_mpa(d, paddr, NULL);
     if (maddr & GPFN_IO_MASK) {
         *pte |= VTLB_PTE_IO;
         return -1;
@@ -536,6 +538,18 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
 //    ps = rr.ps;
     maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
     phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+
+    /* If shadow mode is enabled, virtualize dirty bit.  */
+    if (shadow_mode_enabled(d) && phy_pte.d) {
+        u64 gpfn = paddr >> PAGE_SHIFT;
+        phy_pte.val |= _PAGE_VIRT_D;
+
+        /* If the page is not already dirty, don't set the dirty bit! */
+        if (gpfn < d->arch.shadow_bitmap_size * 8
+            && !test_bit(gpfn, d->arch.shadow_bitmap))
+            phy_pte.d = 0;
+    }
+
     return phy_pte.val;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.