[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/8] HVM save restore: new hyper-call



[PATCH 2/8] HVM save restore: new hyper-call

Signed-off-by: Zhai Edwin <edwin.zhai@xxxxxxxxx>

add a pair of hyper-call for hvm guest context

diff -r 7a38592d0cc9 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Jan 09 15:33:30 2007 +0800
+++ b/tools/libxc/xc_domain.c   Tue Jan 09 15:37:58 2007 +0800
@@ -233,6 +233,50 @@ int xc_domain_getinfolist(int xc_handle,
     unlock_pages(info, max_domains*sizeof(xc_domaininfo_t));
 
     return ret;
+}
+
+/* get info from hvm guest for save */
+int xc_domain_hvm_getcontext(int xc_handle,
+                             uint32_t domid,
+                             hvm_domain_context_t *hvm_ctxt)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_gethvmcontext;
+    domctl.domain = (domid_t)domid;
+    set_xen_guest_handle(domctl.u.hvmcontext.ctxt, hvm_ctxt);
+
+    if ( (rc = mlock(hvm_ctxt, sizeof(*hvm_ctxt))) != 0 )
+        return rc;
+
+    rc = do_domctl(xc_handle, &domctl);
+
+    safe_munlock(hvm_ctxt, sizeof(*hvm_ctxt));
+
+    return rc;
+}
+
+/* set info to hvm guest for restore */
+int xc_domain_hvm_setcontext(int xc_handle,
+                             uint32_t domid,
+                             hvm_domain_context_t *hvm_ctxt)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_sethvmcontext;
+    domctl.domain = domid;
+    set_xen_guest_handle(domctl.u.hvmcontext.ctxt, hvm_ctxt);
+
+    if ( (rc = mlock(hvm_ctxt, sizeof(*hvm_ctxt))) != 0 )
+        return rc;
+
+    rc = do_domctl(xc_handle, &domctl);
+
+    safe_munlock(hvm_ctxt, sizeof(*hvm_ctxt));
+
+    return rc;
 }
 
 int xc_vcpu_getcontext(int xc_handle,
diff -r 7a38592d0cc9 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Jan 09 15:33:30 2007 +0800
+++ b/tools/libxc/xenctrl.h     Tue Jan 09 15:37:58 2007 +0800
@@ -313,6 +313,30 @@ int xc_domain_getinfolist(int xc_handle,
                           xc_domaininfo_t *info);
 
 /**
+ * This function returns information about the context of a hvm domain
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm hvm_ctxt a pointer to a structure to store the execution context of 
the
+ *            hvm domain
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext(int xc_handle,
+                             uint32_t domid,
+                             hvm_domain_context_t *hvm_ctxt);
+
+/**
+ * This function will set the context for hvm domain
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to set the hvm domain context for
+ * @parm hvm_ctxt pointer to the the hvm context with the values to set
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_setcontext(int xc_handle,
+                             uint32_t domid,
+                             hvm_domain_context_t *hvm_ctxt);
+
+/**
  * This function returns information about the execution context of a
  * particular vcpu of a domain.
  *
diff -r 7a38592d0cc9 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jan 11 21:00:37 2007 +0800
@@ -149,11 +149,19 @@ int hvm_domain_initialise(struct domain 
 
 void hvm_domain_destroy(struct domain *d)
 {
+    HVMStateEntry *se, *dse;
     pit_deinit(d);
     rtc_deinit(d);
     pmtimer_deinit(d);
     hpet_deinit(d);
 
+    se = d->arch.hvm_domain.first_se;
+    while (se) {
+        dse = se;
+        se = se->next;
+        xfree(dse);
+    }
+ 
     if ( d->arch.hvm_domain.shared_page_va )
         unmap_domain_page_global(
             (void *)d->arch.hvm_domain.shared_page_va);
diff -r 7a38592d0cc9 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/arch/x86/hvm/intercept.c      Thu Jan 11 21:00:37 2007 +0800
@@ -29,6 +29,8 @@
 #include <asm/current.h>
 #include <io_ports.h>
 #include <xen/event.h>
+#include <xen/compile.h>
+#include <public/version.h>
 
 
 extern struct hvm_mmio_handler hpet_mmio_handler;
@@ -155,6 +157,235 @@ static inline void hvm_mmio_access(struc
     }
 }
 
+/* save/restore support */
+#define HVM_FILE_MAGIC   0x54381286
+#define HVM_FILE_VERSION 0x00000001
+
+int hvm_register_savevm(struct domain *d,
+                    const char *idstr,
+                    int instance_id,
+                    int version_id,
+                    SaveStateHandler *save_state,
+                    LoadStateHandler *load_state,
+                    void *opaque)
+{
+    HVMStateEntry *se, **pse;
+
+    if ( (se = xmalloc(struct HVMStateEntry)) == NULL ){
+        printk("allocat hvmstate entry fail.\n");
+        return -1;
+    }
+
+    strncpy(se->idstr, idstr, HVM_SE_IDSTR_LEN);
+
+    se->instance_id = instance_id;
+    se->version_id = version_id;
+    se->save_state = save_state;
+    se->load_state = load_state;
+    se->opaque = opaque;
+    se->next = NULL;
+
+    /* add at the end of list */
+    pse = &d->arch.hvm_domain.first_se;
+    while (*pse != NULL)
+        pse = &(*pse)->next;
+    *pse = se;
+    return 0;
+}
+
+int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
+{
+    uint32_t len, len_pos, cur_pos;
+    uint32_t eax, ebx, ecx, edx;
+    HVMStateEntry *se;
+    char *chgset;
+
+    if (!is_hvm_vcpu(v)) {
+        printk("hvm_save only for hvm guest!\n");
+        return -1;
+    }
+
+    memset(h, 0, sizeof(hvm_domain_context_t));
+    hvm_put_32u(h, HVM_FILE_MAGIC);
+    hvm_put_32u(h, HVM_FILE_VERSION);
+
+    /* save xen changeset */
+    chgset = strrchr(XEN_CHANGESET, ' ') + 1;
+
+    len = strlen(chgset);
+    hvm_put_8u(h, len);
+    hvm_put_buffer(h, chgset, len);
+
+    /* save cpuid */
+    cpuid(1, &eax, &ebx, &ecx, &edx);
+    hvm_put_32u(h, eax);
+
+    for(se = v->domain->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+        /* ID string */
+        len = strnlen(se->idstr, HVM_SE_IDSTR_LEN);
+        hvm_put_8u(h, len);
+        hvm_put_buffer(h, se->idstr, len);
+
+        hvm_put_32u(h, se->instance_id);
+        hvm_put_32u(h, se->version_id);
+
+        /* record size */
+        len_pos = hvm_ctxt_tell(h);
+        hvm_put_32u(h, 0);
+
+        se->save_state(h, se->opaque);
+
+        cur_pos = hvm_ctxt_tell(h);
+        len = cur_pos - len_pos - 4;
+        hvm_ctxt_seek(h, len_pos);
+        hvm_put_32u(h, len);
+        hvm_ctxt_seek(h, cur_pos);
+
+    }
+
+    h->size = hvm_ctxt_tell(h);
+    hvm_ctxt_seek(h, 0);
+
+    if (h->size >= HVM_CTXT_SIZE) {
+        printk("hvm_domain_context overflow when hvm_save! need %"PRId32" 
bytes for use.\n", h->size);
+        return -1;
+    }
+
+    return 0;
+
+}
+
+static HVMStateEntry *find_se(struct domain *d, const char *idstr, int 
instance_id)
+{
+    HVMStateEntry *se;
+
+    for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+        if (!strncmp(se->idstr, idstr, HVM_SE_IDSTR_LEN) &&
+            instance_id == se->instance_id){
+            return se;
+        }
+    }
+    return NULL;
+}
+
+int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
+{
+    uint32_t len, rec_len, rec_pos, magic, instance_id, version_id;
+    uint32_t eax, ebx, ecx, edx;
+    HVMStateEntry *se;
+    char idstr[HVM_SE_IDSTR_LEN];
+    xen_changeset_info_t chgset;
+    char *cur_chgset;
+    int ret;
+
+    if (!is_hvm_vcpu(v)) {
+        printk("hvm_load only for hvm guest!\n");
+        return -1;
+    }
+
+    if (h->size >= HVM_CTXT_SIZE) {
+        printk("hvm_load fail! seems hvm_domain_context overflow when 
hvm_save! need %"PRId32" bytes.\n", h->size);
+        return -1;
+    }
+
+    hvm_ctxt_seek(h, 0);
+
+    magic = hvm_get_32u(h);
+    if (magic != HVM_FILE_MAGIC) {
+        printk("HVM restore magic dismatch!\n");
+        return -1;
+    }
+
+    magic = hvm_get_32u(h);
+    if (magic != HVM_FILE_VERSION) {
+        printk("HVM restore version dismatch!\n");
+        return -1;
+    }
+
+    /* check xen change set */
+    cur_chgset = strrchr(XEN_CHANGESET, ' ') + 1;
+
+    len = hvm_get_8u(h);
+    if (len > 20) { /*typical length is 18 -- "revision number:changeset id" */
+        printk("wrong change set length %d when hvm restore!\n", len);
+        return -1;
+    }
+
+    hvm_get_buffer(h, chgset, len);
+    chgset[len] = '\0';
+    if (strncmp(cur_chgset, chgset, len + 1))
+        printk("warnings: try to restore hvm guest(%s) on a different 
changeset %s.\n",
+                chgset, cur_chgset);
+
+    /* check cpuid */
+    cpuid(1, &eax, &ebx, &ecx, &edx);
+    ebx = hvm_get_32u(h);
+    /*TODO: need difine how big difference is acceptable */
+    if (ebx != eax)
+        printk("warnings: try to restore hvm guest(0x%"PRIx32") "
+               "on a different type processor(0x%"PRIx32").\n",
+                ebx,
+                eax);
+
+    while(1) {
+        if (hvm_ctxt_end(h)) {
+            break;
+        }
+
+        /* ID string */
+        len = hvm_get_8u(h);
+        if (len > HVM_SE_IDSTR_LEN) {
+            printk("wrong HVM save entry idstr len %d!", len);
+            return -1;
+        }
+
+        hvm_get_buffer(h, idstr, len);
+        idstr[len] = '\0';
+
+        instance_id = hvm_get_32u(h);
+        version_id = hvm_get_32u(h);
+
+        rec_len = hvm_get_32u(h);
+        rec_pos = hvm_ctxt_tell(h);
+
+        se = find_se(v->domain, idstr, instance_id);
+        if (se == NULL) {
+            printk("warnings: hvm load can't find device %s's instance %d!\n",
+                    idstr, instance_id);
+        } else {
+            ret = se->load_state(h, se->opaque, version_id);
+            if (ret < 0)
+                printk("warnings: loading state fail for device %s instance 
%d!\n",
+                        idstr, instance_id);
+        }
+                    
+
+        /* make sure to jump end of record */
+        if ( hvm_ctxt_tell(h) - rec_pos != rec_len) {
+            printk("wrong hvm record size, maybe some dismatch between 
save&restore handler!\n");
+        }
+        hvm_ctxt_seek(h, rec_pos + rec_len);
+    }
+
+    return 0;
+}
+
+int arch_gethvm_ctxt(
+    struct vcpu *v, struct hvm_domain_context *c)
+{
+    if ( !is_hvm_vcpu(v) )
+        return -1;
+
+    return hvm_save(v, c);
+
+}
+
+int arch_sethvm_ctxt(
+        struct vcpu *v, struct hvm_domain_context *c)
+{
+    return hvm_load(v, c);
+}
+
 int hvm_buffered_io_intercept(ioreq_t *p)
 {
     struct vcpu *v = current;
diff -r 7a38592d0cc9 xen/common/domctl.c
--- a/xen/common/domctl.c       Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/common/domctl.c       Thu Jan 11 21:01:31 2007 +0800
@@ -215,6 +215,39 @@ ret_t do_domctl(XEN_GUEST_HANDLE(xen_dom
     }
     break;
 
+    case XEN_DOMCTL_sethvmcontext:
+    { 
+        struct hvm_domain_context *c;
+        struct domain             *d;
+        struct vcpu               *v;
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->domain)) == NULL )
+            break;
+
+        ret = -ENOMEM;
+        if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
+            goto sethvmcontext_out;
+
+        v = d->vcpu[0];
+        
+        ret = -EFAULT;
+
+#ifndef CONFIG_COMPAT
+        if ( copy_from_guest(c, op->u.hvmcontext.ctxt, 1) != 0 )
+            goto sethvmcontext_out;
+
+        ret = arch_sethvm_ctxt(v, c);
+#endif
+
+        xfree(c);
+
+    sethvmcontext_out:
+        put_domain(d);
+
+    }
+    break;
+
     case XEN_DOMCTL_pausedomain:
     {
         struct domain *d = find_domain_by_id(op->domain);
@@ -552,6 +585,46 @@ ret_t do_domctl(XEN_GUEST_HANDLE(xen_dom
     }
     break;
 
+    case XEN_DOMCTL_gethvmcontext:
+    { 
+        struct hvm_domain_context *c;
+        struct domain             *d;
+        struct vcpu               *v;
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->domain)) == NULL )
+            break;
+
+        ret = -ENOMEM;
+        if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
+            goto gethvmcontext_out;
+
+        v = d->vcpu[0];
+
+        ret = -ENODATA;
+        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+            goto gethvmcontext_out;
+        
+        ret = 0;
+        if (arch_gethvm_ctxt(v, c) == -1)
+            ret = -EFAULT;
+
+#ifndef CONFIG_COMPAT
+        if ( copy_to_guest(op->u.hvmcontext.ctxt, c, 1) )
+            ret = -EFAULT;
+
+        xfree(c);
+#endif
+
+        if ( copy_to_guest(u_domctl, op, 1) )
+            ret = -EFAULT;
+
+    gethvmcontext_out:
+        put_domain(d);
+
+    }
+    break;
+
     case XEN_DOMCTL_getvcpuinfo:
     { 
         struct domain *d;
diff -r 7a38592d0cc9 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/include/asm-x86/hvm/domain.h  Tue Jan 09 15:37:58 2007 +0800
@@ -27,6 +27,20 @@
 #include <asm/hvm/io.h>
 #include <public/hvm/params.h>
 
+typedef void SaveStateHandler(hvm_domain_context_t *h, void *opaque);
+typedef int LoadStateHandler(hvm_domain_context_t *h, void *opaque, int 
version_id);
+
+#define HVM_SE_IDSTR_LEN 32
+typedef struct HVMStateEntry {
+    char idstr[HVM_SE_IDSTR_LEN];
+    int instance_id;
+    int version_id;
+    SaveStateHandler *save_state;
+    LoadStateHandler *load_state;
+    void *opaque;
+    struct HVMStateEntry *next;
+} HVMStateEntry;
+
 struct hvm_domain {
     unsigned long          shared_page_va;
     unsigned long          buffered_io_va;
@@ -44,6 +58,9 @@ struct hvm_domain {
     spinlock_t             pbuf_lock;
 
     uint64_t               params[HVM_NR_PARAMS];
+
+    struct hvm_domain_context *hvm_ctxt;
+    HVMStateEntry *first_se;
 };
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
diff -r 7a38592d0cc9 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/include/asm-x86/hvm/support.h Thu Jan 11 21:00:37 2007 +0800
@@ -121,6 +121,131 @@ extern unsigned int opt_hvm_debug_level;
 #define TRACE_VMEXIT(index, value)                              \
     current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
 
+/* save/restore support */
+
+//#define HVM_DEBUG_SUSPEND
+
+extern int hvm_register_savevm(struct domain *d,
+                    const char *idstr,
+                    int instance_id,
+                    int version_id,
+                    SaveStateHandler *save_state,
+                    LoadStateHandler *load_state,
+                    void *opaque);
+
+static inline void hvm_ctxt_seek(hvm_domain_context_t *h, unsigned int pos)
+{
+    h->cur = pos;
+}
+
+static inline uint32_t hvm_ctxt_tell(hvm_domain_context_t *h)
+{
+    return h->cur;
+}
+
+static inline int hvm_ctxt_end(hvm_domain_context_t *h)
+{
+    return (h->cur >= h->size || h->cur >= HVM_CTXT_SIZE);
+}
+
+static inline void hvm_put_byte(hvm_domain_context_t *h, unsigned int i)
+{
+    if (h->cur >= HVM_CTXT_SIZE) {
+        h->cur++;
+        return;
+    }
+    h->data[h->cur++] = (char)i;
+}
+
+static inline void hvm_put_8u(hvm_domain_context_t *h, uint8_t b)
+{
+    hvm_put_byte(h, b);
+}
+
+static inline void hvm_put_16u(hvm_domain_context_t *h, uint16_t b)
+{
+    hvm_put_8u(h, b >> 8);
+    hvm_put_8u(h, b);
+}
+
+static inline void hvm_put_32u(hvm_domain_context_t *h, uint32_t b)
+{
+    hvm_put_16u(h, b >> 16);
+    hvm_put_16u(h, b);
+}
+
+static inline void hvm_put_64u(hvm_domain_context_t *h, uint64_t b)
+{
+    hvm_put_32u(h, b >> 32);
+    hvm_put_32u(h, b);
+}
+
+static inline void hvm_put_buffer(hvm_domain_context_t *h, const char *buf, 
int len)
+{
+    memcpy(&h->data[h->cur], buf, len);
+    h->cur += len;
+}
+
+
+static inline char hvm_get_byte(hvm_domain_context_t *h)
+{
+    if (h->cur >= HVM_CTXT_SIZE) {
+        printk("hvm_get_byte overflow.\n");
+        return -1;
+    }
+
+    if (h->cur >= h->size) {
+        printk("hvm_get_byte exceed data area.\n");
+        return -1;
+    }
+
+    return h->data[h->cur++];
+}
+
+static inline uint8_t hvm_get_8u(hvm_domain_context_t *h)
+{
+    return hvm_get_byte(h);
+}
+
+static inline uint16_t hvm_get_16u(hvm_domain_context_t *h)
+{
+    uint16_t v;
+    v =  hvm_get_8u(h) << 8;
+    v |= hvm_get_8u(h);
+
+    return v;
+}
+
+static inline uint32_t hvm_get_32u(hvm_domain_context_t *h)
+{
+    uint32_t v;
+    v =  hvm_get_16u(h) << 16;
+    v |= hvm_get_16u(h);
+
+    return v;
+}
+
+static inline uint64_t hvm_get_64u(hvm_domain_context_t *h)
+{
+    uint64_t v;
+    v =  (uint64_t)hvm_get_32u(h) << 32;
+    v |= hvm_get_32u(h);
+
+    return v;
+}
+
+static inline void hvm_get_buffer(hvm_domain_context_t *h, char *buf, int len)
+{
+    memcpy(buf, &h->data[h->cur], len);
+    h->cur += len;
+}
+
+extern int hvm_save(struct vcpu*, hvm_domain_context_t *h);
+extern int hvm_load(struct vcpu*, hvm_domain_context_t *h);
+
+extern int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+extern int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+
 extern int hvm_enabled;
 
 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
diff -r 7a38592d0cc9 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Tue Jan 09 15:33:30 2007 +0800
+++ b/xen/include/public/domctl.h       Tue Jan 09 15:37:58 2007 +0800
@@ -386,6 +386,21 @@ struct xen_domctl_settimeoffset {
 };
 typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
+ 
+#define HVM_CTXT_SIZE        6144
+typedef struct hvm_domain_context {
+    uint32_t cur;
+    uint32_t size;
+    uint8_t data[HVM_CTXT_SIZE];
+} hvm_domain_context_t;
+DEFINE_XEN_GUEST_HANDLE(hvm_domain_context_t);
+
+#define XEN_DOMCTL_gethvmcontext   33
+#define XEN_DOMCTL_sethvmcontext   34
+typedef struct xen_domctl_hvmcontext {
+    XEN_GUEST_HANDLE(hvm_domain_context_t) ctxt;  /* IN/OUT */
+} xen_domctl_hvmcontext_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
 
 #define XEN_DOMCTL_real_mode_area     26
 struct xen_domctl_real_mode_area {
@@ -423,6 +438,7 @@ struct xen_domctl {
         struct xen_domctl_arch_setup        arch_setup;
         struct xen_domctl_settimeoffset     settimeoffset;
         struct xen_domctl_real_mode_area    real_mode_area;
+        struct xen_domctl_hvmcontext        hvmcontext;
         uint8_t                             pad[128];
     } u;
 };

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.