[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.10] xen/pvshim: add migration support



commit 83c838c9f853712ac5d36c9dc001eb8903b1e1e2
Author:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
AuthorDate: Thu Jan 11 11:41:19 2018 +0000
Commit:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
CommitDate: Fri Jan 12 15:47:32 2018 +0000

    xen/pvshim: add migration support
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    ---
    Changes since v1:
     - Use bitmap_zero instead of memset.
     - Don't drop the __init attribute of unshare_xen_page_with_guest,
       it's not needed for migration.
     - Remove BUG_ON to check correct mapping, map_domain_page cannot
       fail.
     - Reduce indentation level of pv_shim_shutdown.
---
 xen/arch/x86/guest/xen.c          |  29 +++++++
 xen/arch/x86/pv/shim.c            | 155 +++++++++++++++++++++++++++++++++++++-
 xen/common/domain.c               |  11 ++-
 xen/common/schedule.c             |   3 +-
 xen/drivers/char/xen_pv_console.c |   2 +-
 xen/include/asm-x86/guest/xen.h   |   5 ++
 xen/include/asm-x86/pv/shim.h     |   5 +-
 xen/include/xen/sched.h           |   2 +-
 8 files changed, 197 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index 57b297ad47..2a5554ab26 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -348,6 +348,35 @@ uint32_t hypervisor_cpuid_base(void)
     return xen_cpuid_base;
 }
 
+static void ap_resume(void *unused)
+{
+    map_vcpuinfo();
+    init_evtchn();
+}
+
+void hypervisor_resume(void)
+{
+    /* Reset shared info page. */
+    map_shared_info();
+
+    /*
+     * Reset vcpu_info. Just clean the mapped bitmap and try to map the vcpu
+     * area again. On failure to map (when it was previously mapped) panic
+     * since it's impossible to safely shut down running guest vCPUs in order
+     * to meet the new XEN_LEGACY_MAX_VCPUS requirement.
+     */
+    bitmap_zero(vcpu_info_mapped, NR_CPUS);
+    if ( map_vcpuinfo() && nr_cpu_ids > XEN_LEGACY_MAX_VCPUS )
+        panic("unable to remap vCPU info and vCPUs > legacy limit");
+
+    /* Setup event channel upcall vector. */
+    init_evtchn();
+    smp_call_function(ap_resume, NULL, 1);
+
+    if ( pv_console )
+        pv_console_init();
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index 986f9da58a..c53a4ca407 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -160,10 +160,159 @@ void __init pv_shim_setup_dom(struct domain *d, 
l4_pgentry_t *l4start,
     guest = d;
 }
 
-void pv_shim_shutdown(uint8_t reason)
+static void write_start_info(struct domain *d)
 {
-    /* XXX: handle suspend */
-    xen_hypercall_shutdown(reason);
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    start_info_t *si = map_domain_page(_mfn(is_pv_32bit_domain(d) ? regs->edx
+                                                                  : 
regs->rdx));
+    uint64_t param;
+
+    snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%s",
+             is_pv_32bit_domain(d) ? "32p" : "64");
+    si->nr_pages = d->tot_pages;
+    si->shared_info = virt_to_maddr(d->shared_info);
+    si->flags = 0;
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_PFN, &si->store_mfn));
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_EVTCHN, &param));
+    si->store_evtchn = param;
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_EVTCHN, &param));
+    si->console.domU.evtchn = param;
+    if ( pv_console )
+        si->console.domU.mfn = virt_to_mfn(consoled_get_ring_addr());
+    else if ( xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_PFN,
+                                          &si->console.domU.mfn) )
+        BUG();
+
+    if ( is_pv_32bit_domain(d) )
+        xlat_start_info(si, XLAT_start_info_console_domU);
+
+    unmap_domain_page(si);
+}
+
+int pv_shim_shutdown(uint8_t reason)
+{
+    struct domain *d = current->domain;
+    struct vcpu *v;
+    unsigned int i;
+    uint64_t old_store_pfn, old_console_pfn = 0, store_pfn, console_pfn;
+    uint64_t store_evtchn, console_evtchn;
+    long rc;
+
+    if ( reason != SHUTDOWN_suspend )
+        /* Forward to L0. */
+        return xen_hypercall_shutdown(reason);
+
+    BUG_ON(current->vcpu_id != 0);
+
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_PFN, &old_store_pfn));
+    if ( !pv_console )
+        BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_PFN,
+                                           &old_console_pfn));
+
+    /* Pause the other vcpus before starting the migration. */
+    for_each_vcpu(d, v)
+        if ( v != current )
+            vcpu_pause_by_systemcontroller(v);
+
+    rc = xen_hypercall_shutdown(SHUTDOWN_suspend);
+    if ( rc )
+    {
+        for_each_vcpu(d, v)
+            if ( v != current )
+                vcpu_unpause_by_systemcontroller(v);
+
+        return rc;
+    }
+
+    /* Resume the shim itself first. */
+    hypervisor_resume();
+
+    /*
+     * ATM there's nothing Xen can do if the console/store pfn changes,
+     * because Xen won't have a page_info struct for it.
+     */
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_PFN, &store_pfn));
+    BUG_ON(old_store_pfn != store_pfn);
+    if ( !pv_console )
+    {
+        BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_PFN,
+                                           &console_pfn));
+        BUG_ON(old_console_pfn != console_pfn);
+    }
+
+    /* Update domain id. */
+    d->domain_id = get_initial_domain_id();
+
+    /* Clean the iomem range. */
+    BUG_ON(iomem_deny_access(d, 0, ~0UL));
+
+    /* Clean grant frames. */
+    xfree(grant_frames);
+    grant_frames = NULL;
+    nr_grant_list = 0;
+
+    /* Clean event channels. */
+    for ( i = 0; i < EVTCHN_2L_NR_CHANNELS; i++ )
+    {
+        if ( !port_is_valid(d, i) )
+            continue;
+
+        if ( evtchn_handled(d, i) )
+            evtchn_close(d, i, false);
+        else
+            evtchn_free(d, evtchn_from_port(d, i));
+    }
+
+    /* Reserve store/console event channel. */
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_EVTCHN, &store_evtchn));
+    BUG_ON(evtchn_allocate_port(d, store_evtchn));
+    evtchn_reserve(d, store_evtchn);
+    BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_EVTCHN,
+                                       &console_evtchn));
+    BUG_ON(evtchn_allocate_port(d, console_evtchn));
+    evtchn_reserve(d, console_evtchn);
+
+    /* Clean watchdogs. */
+    watchdog_domain_destroy(d);
+    watchdog_domain_init(d);
+
+    /* Clean the PIRQ EOI page. */
+    if ( d->arch.pirq_eoi_map != NULL )
+    {
+        unmap_domain_page_global(d->arch.pirq_eoi_map);
+        put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
+        d->arch.pirq_eoi_map = NULL;
+        d->arch.pirq_eoi_map_mfn = 0;
+        d->arch.auto_unmask = 0;
+    }
+
+    /*
+     * NB: there's no need to fixup the p2m, since the mfns assigned
+     * to the PV guest have not changed at all. Just re-write the
+     * start_info fields with the appropriate value.
+     */
+    write_start_info(d);
+
+    for_each_vcpu(d, v)
+    {
+        /* Unmap guest vcpu_info pages. */
+        unmap_vcpu_info(v);
+
+        /* Reset the periodic timer to the default value. */
+        v->periodic_period = MILLISECS(10);
+        /* Stop the singleshot timer. */
+        stop_timer(&v->singleshot_timer);
+
+        if ( test_bit(_VPF_down, &v->pause_flags) )
+            BUG_ON(vcpu_reset(v));
+
+        if ( v != current )
+            vcpu_unpause_by_systemcontroller(v);
+        else
+            vcpu_force_reschedule(v);
+    }
+
+    return 0;
 }
 
 static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 1ba05fa3a1..9a703734eb 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -689,16 +689,13 @@ void __domain_crash_synchronous(void)
 }
 
 
-void domain_shutdown(struct domain *d, u8 reason)
+int domain_shutdown(struct domain *d, u8 reason)
 {
     struct vcpu *v;
 
 #ifdef CONFIG_X86
     if ( pv_shim )
-    {
-        pv_shim_shutdown(reason);
-        return;
-    }
+        return pv_shim_shutdown(reason);
 #endif
 
     spin_lock(&d->shutdown_lock);
@@ -713,7 +710,7 @@ void domain_shutdown(struct domain *d, u8 reason)
     if ( d->is_shutting_down )
     {
         spin_unlock(&d->shutdown_lock);
-        return;
+        return 0;
     }
 
     d->is_shutting_down = 1;
@@ -735,6 +732,8 @@ void domain_shutdown(struct domain *d, u8 reason)
     __domain_finalise_shutdown(d);
 
     spin_unlock(&d->shutdown_lock);
+
+    return 0;
 }
 
 void domain_resume(struct domain *d)
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 88279213e8..b7884263f2 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1149,11 +1149,10 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( copy_from_guest(&sched_shutdown, arg, 1) )
             break;
 
-        ret = 0;
         TRACE_3D(TRC_SCHED_SHUTDOWN,
                  current->domain->domain_id, current->vcpu_id,
                  sched_shutdown.reason);
-        domain_shutdown(current->domain, (u8)sched_shutdown.reason);
+        ret = domain_shutdown(current->domain, (u8)sched_shutdown.reason);
 
         break;
     }
diff --git a/xen/drivers/char/xen_pv_console.c 
b/xen/drivers/char/xen_pv_console.c
index 948343303e..cc1c1d743f 100644
--- a/xen/drivers/char/xen_pv_console.c
+++ b/xen/drivers/char/xen_pv_console.c
@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(tx_lock);
 
 bool pv_console;
 
-void __init pv_console_init(void)
+void pv_console_init(void)
 {
     long r;
     uint64_t raw_pfn = 0, raw_evtchn = 0;
diff --git a/xen/include/asm-x86/guest/xen.h b/xen/include/asm-x86/guest/xen.h
index ac48dcbe44..11243fe60d 100644
--- a/xen/include/asm-x86/guest/xen.h
+++ b/xen/include/asm-x86/guest/xen.h
@@ -39,6 +39,7 @@ int hypervisor_free_unused_page(mfn_t mfn);
 void hypervisor_fixup_e820(struct e820map *e820);
 const unsigned long *hypervisor_reserved_pages(unsigned int *size);
 uint32_t hypervisor_cpuid_base(void);
+void hypervisor_resume(void);
 
 DECLARE_PER_CPU(unsigned int, vcpu_id);
 DECLARE_PER_CPU(struct vcpu_info *, vcpu_info);
@@ -72,6 +73,10 @@ static inline uint32_t hypervisor_cpuid_base(void)
     ASSERT_UNREACHABLE();
     return 0;
 };
+static inline void hypervisor_resume(void)
+{
+    ASSERT_UNREACHABLE();
+};
 
 #endif /* CONFIG_XEN_GUEST */
 #endif /* __X86_GUEST_XEN_H__ */
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index ab656fd854..4d5f0b43fc 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -35,7 +35,7 @@ void pv_shim_setup_dom(struct domain *d, l4_pgentry_t 
*l4start,
                        unsigned long va_start, unsigned long store_va,
                        unsigned long console_va, unsigned long vphysmap,
                        start_info_t *si);
-void pv_shim_shutdown(uint8_t reason);
+int pv_shim_shutdown(uint8_t reason);
 void pv_shim_inject_evtchn(unsigned int port);
 domid_t get_initial_domain_id(void);
 
@@ -50,9 +50,10 @@ static inline void pv_shim_setup_dom(struct domain *d, 
l4_pgentry_t *l4start,
 {
     ASSERT_UNREACHABLE();
 }
-static inline void pv_shim_shutdown(uint8_t reason)
+static inline int pv_shim_shutdown(uint8_t reason)
 {
     ASSERT_UNREACHABLE();
+    return 0;
 }
 static inline void pv_shim_inject_evtchn(unsigned int port)
 {
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 64abc1df6c..2541ecb04f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -603,7 +603,7 @@ static inline struct domain *rcu_lock_current_domain(void)
 struct domain *get_domain_by_id(domid_t dom);
 void domain_destroy(struct domain *d);
 int domain_kill(struct domain *d);
-void domain_shutdown(struct domain *d, u8 reason);
+int domain_shutdown(struct domain *d, u8 reason);
 void domain_resume(struct domain *d);
 void domain_pause_for_debugger(void);
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.10

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.