|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] tools/libs: move xc_resume.c to libxenguest
commit bf1fc18901dfea05a69f661493b934c0db7d3503
Author: Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Fri Jun 4 08:02:12 2021 +0200
Commit: Julien Grall <jgrall@xxxxxxxxxx>
CommitDate: Fri Jun 4 18:56:46 2021 +0100
tools/libs: move xc_resume.c to libxenguest
The guest suspend functionality is already part of libxenguest. Move
the resume functionality from libxenctrl to libxenguest, too.
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Acked-by: Wei Liu <wl@xxxxxxx>
---
tools/include/xenctrl.h | 63 ----------
tools/include/xenguest.h | 62 ++++++++++
tools/libs/ctrl/Makefile | 1 -
tools/libs/ctrl/xc_resume.c | 264 ------------------------------------------
tools/libs/guest/Makefile | 1 +
tools/libs/guest/xg_resume.c | 265 +++++++++++++++++++++++++++++++++++++++++++
6 files changed, 328 insertions(+), 328 deletions(-)
diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index 58d3377d6a..2a7c836a02 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -576,69 +576,6 @@ int xc_domain_destroy(xc_interface *xch,
uint32_t domid);
-/**
- * This function resumes a suspended domain. The domain should have
- * been previously suspended.
- *
- * Note that there are 'xc_domain_suspend' as suspending a domain
- * is quite the endeavour.
- *
- * For the purpose of this explanation there are three guests:
- * PV (using hypercalls for privilgied operations), HVM
- * (fully hardware virtualized guests using emulated devices for everything),
- * and PVHVM (PV aware with hardware virtualisation).
- *
- * HVM guest are the simplest - they suspend via S3 / S4 and resume from
- * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
- *
- * PV and PVHVM communicate via hypercalls for suspend (and resume).
- * For suspend the toolstack initiates the process by writing an value
- * in XenBus "control/shutdown" with the string "suspend".
- *
- * The PV guest stashes anything it deems neccessary in 'struct
- * start_info' in case of failure (PVHVM may ignore this) and calls
- * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
- * argument it passes the MFN to 'struct start_info').
- *
- * And then the guest is suspended.
- *
- * The checkpointing or notifying a guest that the suspend failed or
- * cancelled (in case of checkpoint) is by having the
- * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
- * value.
- *
- * The PV and PVHVM resume path are similar. For PV it would be
- * similar to bootup - figure out where the 'struct start_info' is (or
- * if the suspend was cancelled aka checkpointed - reuse the saved
- * values).
- *
- * From here on they differ depending whether the guest is PV or PVHVM
- * in specifics but follow overall the same path:
- * - PV: Bringing up the vCPUS,
- * - PVHVM: Setup vector callback,
- * - Bring up vCPU runstates,
- * - Remap the grant tables if checkpointing or setup from scratch,
- *
- *
- * If the resume was not checkpointing (or if suspend was succesful) we would
- * setup the PV timers and the different PV events. Lastly the PV drivers
- * re-negotiate with the backend.
- *
- * This function would return before the guest started resuming. That is
- * the guest would be in non-running state and its vCPU context would be
- * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
- * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
- * BIOS handling S3 suspend.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to resume
- * @parm fast use cooperative resume (guest must support this)
- * return 0 on success, -1 on failure
- */
-int xc_domain_resume(xc_interface *xch,
- uint32_t domid,
- int fast);
-
/**
* This function will shutdown a domain. This is intended for use in
* fully-virtualized domains where this operation is analogous to the
diff --git a/tools/include/xenguest.h b/tools/include/xenguest.h
index f9fb0449ad..61d0a82f48 100644
--- a/tools/include/xenguest.h
+++ b/tools/include/xenguest.h
@@ -689,6 +689,68 @@ int xc_query_page_offline_status(xc_interface *xch,
unsigned long start,
int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn);
+/**
+ * This function resumes a suspended domain. The domain should have
+ * been previously suspended.
+ *
+ * Note that there are 'xc_domain_suspend' as suspending a domain
+ * is quite the endeavour.
+ *
+ * For the purpose of this explanation there are three guests:
+ * PV (using hypercalls for privilgied operations), HVM
+ * (fully hardware virtualized guests using emulated devices for everything),
+ * and PVHVM (PV aware with hardware virtualisation).
+ *
+ * HVM guest are the simplest - they suspend via S3 / S4 and resume from
+ * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
+ *
+ * PV and PVHVM communicate via hypercalls for suspend (and resume).
+ * For suspend the toolstack initiates the process by writing an value
+ * in XenBus "control/shutdown" with the string "suspend".
+ *
+ * The PV guest stashes anything it deems neccessary in 'struct
+ * start_info' in case of failure (PVHVM may ignore this) and calls
+ * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
+ * argument it passes the MFN to 'struct start_info').
+ *
+ * And then the guest is suspended.
+ *
+ * The checkpointing or notifying a guest that the suspend failed or
+ * cancelled (in case of checkpoint) is by having the
+ * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
+ * value.
+ *
+ * The PV and PVHVM resume path are similar. For PV it would be
+ * similar to bootup - figure out where the 'struct start_info' is (or
+ * if the suspend was cancelled aka checkpointed - reuse the saved
+ * values).
+ *
+ * From here on they differ depending whether the guest is PV or PVHVM
+ * in specifics but follow overall the same path:
+ * - PV: Bringing up the vCPUS,
+ * - PVHVM: Setup vector callback,
+ * - Bring up vCPU runstates,
+ * - Remap the grant tables if checkpointing or setup from scratch,
+ *
+ *
+ * If the resume was not checkpointing (or if suspend was succesful) we would
+ * setup the PV timers and the different PV events. Lastly the PV drivers
+ * re-negotiate with the backend.
+ *
+ * This function would return before the guest started resuming. That is
+ * the guest would be in non-running state and its vCPU context would be
+ * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
+ * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
+ * BIOS handling S3 suspend.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to resume
+ * @parm fast use cooperative resume (guest must support this)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_resume(xc_interface *xch,
+ uint32_t domid,
+ int fast);
/**
* Memory related information, such as PFN types, the P2M table,
diff --git a/tools/libs/ctrl/Makefile b/tools/libs/ctrl/Makefile
index ce9ecae710..fbeb3a3537 100644
--- a/tools/libs/ctrl/Makefile
+++ b/tools/libs/ctrl/Makefile
@@ -20,7 +20,6 @@ SRCS-y += xc_rt.c
SRCS-y += xc_tbuf.c
SRCS-y += xc_pm.c
SRCS-y += xc_cpu_hotplug.c
-SRCS-y += xc_resume.c
SRCS-y += xc_vm_event.c
SRCS-y += xc_vmtrace.c
SRCS-y += xc_monitor.c
diff --git a/tools/libs/ctrl/xc_resume.c b/tools/libs/ctrl/xc_resume.c
deleted file mode 100644
index e3c8e83aa9..0000000000
--- a/tools/libs/ctrl/xc_resume.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-#if defined(__i386__) || defined(__x86_64__)
-
-#include <xen/foreign/x86_32.h>
-#include <xen/foreign/x86_64.h>
-#include <xen/hvm/params.h>
-#include "xc_core.h"
-
-static int modify_returncode(xc_interface *xch, uint32_t domid)
-{
- vcpu_guest_context_any_t ctxt;
- xc_dominfo_t info;
- xen_capabilities_info_t caps;
- struct domain_info_context _dinfo = {};
- struct domain_info_context *dinfo = &_dinfo;
- int rc;
-
- if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
- info.domid != domid )
- {
- PERROR("Could not get domain info");
- return -1;
- }
-
- if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
- {
- ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
- info.shutdown, info.shutdown_reason);
- errno = EINVAL;
- return -1;
- }
-
- if ( info.hvm )
- {
- /* HVM guests without PV drivers have no return code to modify. */
- uint64_t irq = 0;
- xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
- if ( !irq )
- return 0;
-
- /* HVM guests have host address width. */
- if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
- {
- PERROR("Could not get Xen capabilities");
- return -1;
- }
- dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
- }
- else
- {
- /* Probe PV guest address width. */
- if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
- return -1;
- }
-
- if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
- return rc;
-
- SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
-
- if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
- return rc;
-
- return 0;
-}
-
-#else
-
-static int modify_returncode(xc_interface *xch, uint32_t domid)
-{
- return 0;
-
-}
-
-#endif
-
-static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
-{
- DECLARE_DOMCTL;
- int rc;
-
- /*
- * Set hypercall return code to indicate that suspend is cancelled
- * (rather than resuming in a new domain context).
- */
- if ( (rc = modify_returncode(xch, domid)) != 0 )
- return rc;
-
- domctl.cmd = XEN_DOMCTL_resumedomain;
- domctl.domain = domid;
- return do_domctl(xch, &domctl);
-}
-
-#if defined(__i386__) || defined(__x86_64__)
-static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
-{
- DECLARE_DOMCTL;
-
- /*
- * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
- * the domctl, the guest will run.
- *
- * If it is PVHVM, the guest called the hypercall
- * SCHEDOP_shutdown:SHUTDOWN_suspend
- * to suspend itself. We don't modify the return code, so the PV driver
- * will disconnect and reconnect.
- *
- * If it is a HVM, the guest will continue running.
- */
- domctl.cmd = XEN_DOMCTL_resumedomain;
- domctl.domain = domid;
- return do_domctl(xch, &domctl);
-}
-#endif
-
-static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
-{
- DECLARE_DOMCTL;
- xc_dominfo_t info;
- int i, rc = -1;
-#if defined(__i386__) || defined(__x86_64__)
- struct domain_info_context _dinfo = { .guest_width = 0,
- .p2m_size = 0 };
- struct domain_info_context *dinfo = &_dinfo;
- xen_pfn_t mfn, store_mfn, console_mfn;
- vcpu_guest_context_any_t ctxt;
- start_info_any_t *start_info;
- shared_info_any_t *shinfo = NULL;
- xen_pfn_t *p2m = NULL;
-#endif
-
- if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
- {
- PERROR("Could not get domain info");
- return rc;
- }
-
- /*
- * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
- */
-#if defined(__i386__) || defined(__x86_64__)
- if ( info.hvm )
- return xc_domain_resume_hvm(xch, domid);
-
- if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
- {
- PERROR("Could not get domain width");
- return rc;
- }
-
- /* Map the shared info frame */
- shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
- PROT_READ, info.shared_info_frame);
- if ( shinfo == NULL )
- {
- ERROR("Couldn't map shared info");
- goto out;
- }
-
- /* Map the p2m list */
- if ( xc_core_arch_map_p2m(xch, dinfo, &info, shinfo, &p2m) )
- {
- ERROR("Couldn't map p2m table");
- goto out;
- }
-
- if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
- {
- ERROR("Could not get vcpu context");
- goto out;
- }
-
- mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
-
- start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
- PROT_READ | PROT_WRITE, mfn);
- if ( start_info == NULL )
- {
- ERROR("Couldn't map start_info");
- goto out;
- }
-
- store_mfn = GET_FIELD(start_info, store_mfn, dinfo->guest_width);
- console_mfn = GET_FIELD(start_info, console.domU.mfn, dinfo->guest_width);
- if ( dinfo->guest_width == 4 )
- {
- store_mfn = ((uint32_t *)p2m)[store_mfn];
- console_mfn = ((uint32_t *)p2m)[console_mfn];
- }
- else
- {
- store_mfn = ((uint64_t *)p2m)[store_mfn];
- console_mfn = ((uint64_t *)p2m)[console_mfn];
- }
- SET_FIELD(start_info, store_mfn, store_mfn, dinfo->guest_width);
- SET_FIELD(start_info, console.domU.mfn, console_mfn, dinfo->guest_width);
-
- munmap(start_info, PAGE_SIZE);
-#endif /* defined(__i386__) || defined(__x86_64__) */
-
- /* Reset all secondary CPU states. */
- for ( i = 1; i <= info.max_vcpu_id; i++ )
- if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
- {
- ERROR("Couldn't reset vcpu state");
- goto out;
- }
-
- /* Ready to resume domain execution now. */
- domctl.cmd = XEN_DOMCTL_resumedomain;
- domctl.domain = domid;
- rc = do_domctl(xch, &domctl);
-
-out:
-#if defined(__i386__) || defined(__x86_64__)
- if (p2m)
- munmap(p2m, dinfo->p2m_frames * PAGE_SIZE);
- if (shinfo)
- munmap(shinfo, PAGE_SIZE);
-#endif
-
- return rc;
-}
-
-/*
- * Resume execution of a domain after suspend shutdown.
- * This can happen in one of two ways:
- * 1. (fast=1) Resume the guest without resetting the domain environment.
- * The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
- *
- * 2. (fast=0) Reset guest environment so it believes it is resumed in a new
- * domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
- * will return 0.
- *
- * (1) should only by used for guests which can handle the special return
- * code. Also note that the insertion of the return code is quite interesting
- * and that the guest MUST be paused - otherwise we would be corrupting
- * the guest vCPU state.
- *
- * (2) should be used only for guests which cannot handle the special
- * new return code - and it is always safe (but slower).
- */
-int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
-{
- return (fast
- ? xc_domain_resume_cooperative(xch, domid)
- : xc_domain_resume_any(xch, domid));
-}
diff --git a/tools/libs/guest/Makefile b/tools/libs/guest/Makefile
index 6d2a1d5bbc..2a2323ff09 100644
--- a/tools/libs/guest/Makefile
+++ b/tools/libs/guest/Makefile
@@ -9,6 +9,7 @@ endif
SRCS-y += xg_private.c
SRCS-y += xg_domain.c
SRCS-y += xg_suspend.c
+SRCS-y += xg_resume.c
ifeq ($(CONFIG_MIGRATE),y)
SRCS-y += xg_sr_common.c
SRCS-$(CONFIG_X86) += xg_sr_common_x86.c
diff --git a/tools/libs/guest/xg_resume.c b/tools/libs/guest/xg_resume.c
new file mode 100644
index 0000000000..3bdefb2eef
--- /dev/null
+++ b/tools/libs/guest/xg_resume.c
@@ -0,0 +1,265 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include "xenguest.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <xen/foreign/x86_32.h>
+#include <xen/foreign/x86_64.h>
+#include <xen/hvm/params.h>
+#include "xc_core.h"
+
+static int modify_returncode(xc_interface *xch, uint32_t domid)
+{
+ vcpu_guest_context_any_t ctxt;
+ xc_dominfo_t info;
+ xen_capabilities_info_t caps;
+ struct domain_info_context _dinfo = {};
+ struct domain_info_context *dinfo = &_dinfo;
+ int rc;
+
+ if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
+ info.domid != domid )
+ {
+ PERROR("Could not get domain info");
+ return -1;
+ }
+
+ if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
+ {
+ ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
+ info.shutdown, info.shutdown_reason);
+ errno = EINVAL;
+ return -1;
+ }
+
+ if ( info.hvm )
+ {
+ /* HVM guests without PV drivers have no return code to modify. */
+ uint64_t irq = 0;
+ xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
+ if ( !irq )
+ return 0;
+
+ /* HVM guests have host address width. */
+ if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
+ {
+ PERROR("Could not get Xen capabilities");
+ return -1;
+ }
+ dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
+ }
+ else
+ {
+ /* Probe PV guest address width. */
+ if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
+ return -1;
+ }
+
+ if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
+ return rc;
+
+ SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
+
+ if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
+ return rc;
+
+ return 0;
+}
+
+#else
+
+static int modify_returncode(xc_interface *xch, uint32_t domid)
+{
+ return 0;
+
+}
+
+#endif
+
+static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
+{
+ DECLARE_DOMCTL;
+ int rc;
+
+ /*
+ * Set hypercall return code to indicate that suspend is cancelled
+ * (rather than resuming in a new domain context).
+ */
+ if ( (rc = modify_returncode(xch, domid)) != 0 )
+ return rc;
+
+ domctl.cmd = XEN_DOMCTL_resumedomain;
+ domctl.domain = domid;
+ return do_domctl(xch, &domctl);
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ /*
+ * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
+ * the domctl, the guest will run.
+ *
+ * If it is PVHVM, the guest called the hypercall
+ * SCHEDOP_shutdown:SHUTDOWN_suspend
+ * to suspend itself. We don't modify the return code, so the PV driver
+ * will disconnect and reconnect.
+ *
+ * If it is a HVM, the guest will continue running.
+ */
+ domctl.cmd = XEN_DOMCTL_resumedomain;
+ domctl.domain = domid;
+ return do_domctl(xch, &domctl);
+}
+#endif
+
+static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
+{
+ DECLARE_DOMCTL;
+ xc_dominfo_t info;
+ int i, rc = -1;
+#if defined(__i386__) || defined(__x86_64__)
+ struct domain_info_context _dinfo = { .guest_width = 0,
+ .p2m_size = 0 };
+ struct domain_info_context *dinfo = &_dinfo;
+ xen_pfn_t mfn, store_mfn, console_mfn;
+ vcpu_guest_context_any_t ctxt;
+ start_info_any_t *start_info;
+ shared_info_any_t *shinfo = NULL;
+ xen_pfn_t *p2m = NULL;
+#endif
+
+ if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
+ {
+ PERROR("Could not get domain info");
+ return rc;
+ }
+
+ /*
+ * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
+ */
+#if defined(__i386__) || defined(__x86_64__)
+ if ( info.hvm )
+ return xc_domain_resume_hvm(xch, domid);
+
+ if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
+ {
+ PERROR("Could not get domain width");
+ return rc;
+ }
+
+ /* Map the shared info frame */
+ shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+ PROT_READ, info.shared_info_frame);
+ if ( shinfo == NULL )
+ {
+ ERROR("Couldn't map shared info");
+ goto out;
+ }
+
+ /* Map the p2m list */
+ if ( xc_core_arch_map_p2m(xch, dinfo, &info, shinfo, &p2m) )
+ {
+ ERROR("Couldn't map p2m table");
+ goto out;
+ }
+
+ if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
+ {
+ ERROR("Could not get vcpu context");
+ goto out;
+ }
+
+ mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
+
+ start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, mfn);
+ if ( start_info == NULL )
+ {
+ ERROR("Couldn't map start_info");
+ goto out;
+ }
+
+ store_mfn = GET_FIELD(start_info, store_mfn, dinfo->guest_width);
+ console_mfn = GET_FIELD(start_info, console.domU.mfn, dinfo->guest_width);
+ if ( dinfo->guest_width == 4 )
+ {
+ store_mfn = ((uint32_t *)p2m)[store_mfn];
+ console_mfn = ((uint32_t *)p2m)[console_mfn];
+ }
+ else
+ {
+ store_mfn = ((uint64_t *)p2m)[store_mfn];
+ console_mfn = ((uint64_t *)p2m)[console_mfn];
+ }
+ SET_FIELD(start_info, store_mfn, store_mfn, dinfo->guest_width);
+ SET_FIELD(start_info, console.domU.mfn, console_mfn, dinfo->guest_width);
+
+ munmap(start_info, PAGE_SIZE);
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+ /* Reset all secondary CPU states. */
+ for ( i = 1; i <= info.max_vcpu_id; i++ )
+ if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
+ {
+ ERROR("Couldn't reset vcpu state");
+ goto out;
+ }
+
+ /* Ready to resume domain execution now. */
+ domctl.cmd = XEN_DOMCTL_resumedomain;
+ domctl.domain = domid;
+ rc = do_domctl(xch, &domctl);
+
+out:
+#if defined(__i386__) || defined(__x86_64__)
+ if (p2m)
+ munmap(p2m, dinfo->p2m_frames * PAGE_SIZE);
+ if (shinfo)
+ munmap(shinfo, PAGE_SIZE);
+#endif
+
+ return rc;
+}
+
+/*
+ * Resume execution of a domain after suspend shutdown.
+ * This can happen in one of two ways:
+ * 1. (fast=1) Resume the guest without resetting the domain environment.
+ * The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
+ *
+ * 2. (fast=0) Reset guest environment so it believes it is resumed in a new
+ * domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
+ * will return 0.
+ *
+ * (1) should only by used for guests which can handle the special return
+ * code. Also note that the insertion of the return code is quite interesting
+ * and that the guest MUST be paused - otherwise we would be corrupting
+ * the guest vCPU state.
+ *
+ * (2) should be used only for guests which cannot handle the special
+ * new return code - and it is always safe (but slower).
+ */
+int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
+{
+ return (fast
+ ? xc_domain_resume_cooperative(xch, domid)
+ : xc_domain_resume_any(xch, domid));
+}
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |