[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFCv2 0/1] Introduce VCPUOP_reset_vcpu_info



The patch and guest code are based on the prototype by Konrad Rzeszutek Wilk.

VCPUOP_reset_vcpu_info is required to support kexec performed by smp pvhvm
guest. It was tested with the following guest code:

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 4fd979e..6e8021c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -183,8 +183,6 @@ static void xen_vcpu_setup(int cpu)
         * This path is called twice on PVHVM - first during bootup via
         * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
         * hotplugged: cpu_up -> xen_hvm_cpu_notify.
-        * As we can only do the VCPUOP_register_vcpu_info once lets
-        * not over-write its result.
         *
         * For PV it is called during restore (xen_vcpu_restore) and bootup
         * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
@@ -210,10 +208,6 @@ static void xen_vcpu_setup(int cpu)
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
           a percpu-variable.
-          N.B. This hypercall can _only_ be called once per CPU. Subsequent
-          calls will error out with -EINVAL. This is due to the fact that
-          hypervisor has no unregister variant and this hypercall does not
-          allow to over-write info.mfn and info.offset.
         */
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
 
@@ -228,6 +222,22 @@ static void xen_vcpu_setup(int cpu)
        }
 }
 
+void xen_teardown_vcpu_setup(int cpu)
+{
+       int err;
+
+       if (!have_vcpu_info_placement)
+               return;
+
+       err = HYPERVISOR_vcpu_op(VCPUOP_reset_vcpu_info, cpu, NULL);
+       if (err) {
+               xen_raw_printk("%s: VCPUOP_reset_vcpu_info rc: %d\n", __func__, 
err);
+               return;
+       }
+       if (cpu < MAX_VIRT_CPUS)
+               per_cpu(xen_vcpu, cpu) = 
&HYPERVISOR_shared_info->vcpu_info[cpu];
+}
+
 /*
  * On restore, set the vcpu placement up again.
  * If it fails, then we're in a bad state, since
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index bc5e897..7c39a38 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -769,10 +769,24 @@ static void xen_hvm_cpu_die(unsigned int cpu)
 #ifdef CONFIG_KEXEC
 void xen_kexec_shutdown(void)
 {
+       int cpu;
+       cpumask_var_t cpu_offline_mask;
+
        if (!kexec_in_progress)
                return;
 
+       gnttab_suspend();
+
+       /* Stop all CPUs except for the first one */
+       disable_nonboot_cpus();
+
        xen_hvm_reset_eventchannels();
+
+       cpumask_andnot(cpu_offline_mask, cpu_present_mask,
+                      cpu_online_mask);
+
+       for_each_cpu(cpu, cpu_offline_mask)
+               xen_teardown_vcpu_setup(cpu);
 }
 #endif
 
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d083e82..36dd380 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -53,6 +53,7 @@ void xen_init_irq_ops(void);
 void xen_setup_timer(int cpu);
 void xen_setup_runstate_info(int cpu);
 void xen_teardown_timer(int cpu);
+void xen_teardown_vcpu_setup(int cpu);
 cycle_t xen_clocksource_read(void);
 void xen_setup_cpu_clockevents(void);
 void __init xen_init_time_ops(void);
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index b05288c..a0a374c 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -172,4 +172,6 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
 
 /* Send an NMI to the specified VCPU. @extra_arg == NULL. */
 #define VCPUOP_send_nmi             11
+
+#define VCPUOP_reset_vcpu_info      14
 #endif /* __XEN_PUBLIC_VCPU_H__ */

Vitaly Kuznetsov (1):
  Introduce VCPUOP_reset_vcpu_info

 xen/arch/x86/hvm/hvm.c    |  1 +
 xen/common/domain.c       | 61 +++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/vcpu.h | 14 +++++++++++
 3 files changed, 76 insertions(+)

-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.