[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] compilation fix caused by 17880:d3a87899985d (was Re: [Xen-devel] [PATCH] libxc: use vcpu_guest_context_either_t instead of vcpu_guest_context_t)



# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1213932168 -32400
# Node ID 97de6df3fddc2747ff159df0ed091bac909765a6
# Parent  d3a87899985d8f0c91aed55ff05451aee3f973b9
[IA64] compilation fix caused by 17880:d3a87899985d

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff --git a/tools/debugger/xenitp/xenitp.c b/tools/debugger/xenitp/xenitp.c
--- a/tools/debugger/xenitp/xenitp.c
+++ b/tools/debugger/xenitp/xenitp.c
@@ -57,6 +57,16 @@ static int cur_vcpu;
 #define CFM_SOF_MASK            0x3f
 
 int virt_to_phys (int is_inst, unsigned long vaddr, unsigned long *paddr);
+
+/* wrapper for vcpu_gest_context_any_t */
+static int xc_ia64_vcpu_getcontext(int xc_handle,
+                                   uint32_t domid,
+                                   uint32_t vcpu,
+                                   vcpu_guest_context_t *ctxt)
+{
+    return xc_vcpu_getcontext(xc_handle, domid, vcpu,
+                              (vcpu_guest_context_any_t *)ctxt);
+}
 
 static inline unsigned int ctx_slot (vcpu_guest_context_t *ctx)
 {
@@ -729,7 +739,7 @@ int wait_domain (int vcpu, vcpu_guest_co
         fflush (stdout);
         nanosleep (&ts, NULL);
     }
-    return xc_vcpu_getcontext (xc_handle, domid, vcpu, ctx);
+    return xc_ia64_vcpu_getcontext (xc_handle, domid, vcpu, ctx);
 }
 
 int virt_to_phys (int is_inst, unsigned long vaddr, unsigned long *paddr)
@@ -945,13 +955,13 @@ char *parse_arg (char **buf)
     return res;
 }
 
-vcpu_guest_context_t vcpu_ctx[MAX_VIRT_CPUS];
+vcpu_guest_context_any_t vcpu_ctx_any[MAX_VIRT_CPUS];
 
 int vcpu_setcontext (int vcpu)
 {
     int ret;
 
-    ret = xc_vcpu_setcontext (xc_handle, domid, vcpu, &vcpu_ctx[vcpu]);
+    ret = xc_vcpu_setcontext (xc_handle, domid, vcpu, &vcpu_ctx_any[vcpu]);
     if (ret < 0)
         perror ("xc_vcpu_setcontext");
 
@@ -1518,7 +1528,7 @@ enum cmd_status do_command (int vcpu, ch
     int flag_ambiguous;
 
     cur_vcpu = vcpu;
-    cur_ctx = &vcpu_ctx[vcpu];
+    cur_ctx = &vcpu_ctx_any[vcpu].c;
 
     /* Handle repeat last-command.  */
     if (*line == 0) {
@@ -1575,7 +1585,7 @@ void xenitp (int vcpu)
     int ret;
     struct sigaction sa;
 
-    cur_ctx = &vcpu_ctx[vcpu];
+    cur_ctx = &vcpu_ctx_any[vcpu].c;
 
     xc_handle = xc_interface_open (); /* for accessing control interface */
 
@@ -1588,9 +1598,9 @@ void xenitp (int vcpu)
         exit (-1);
     }
 
-    ret = xc_vcpu_getcontext (xc_handle, domid, vcpu, cur_ctx);
+    ret = xc_ia64_vcpu_getcontext (xc_handle, domid, vcpu, cur_ctx);
     if (ret < 0) {
-        perror ("xc_vcpu_getcontext");
+        perror ("xc_ia64_vcpu_getcontext");
         exit (-1);
     }
 
diff --git a/tools/libxc/ia64/xc_ia64_hvm_build.c 
b/tools/libxc/ia64/xc_ia64_hvm_build.c
--- a/tools/libxc/ia64/xc_ia64_hvm_build.c
+++ b/tools/libxc/ia64/xc_ia64_hvm_build.c
@@ -1052,7 +1052,8 @@ int
 int
 xc_hvm_build(int xc_handle, uint32_t domid, int memsize, const char 
*image_name)
 {
-    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
+    vcpu_guest_context_any_t st_ctxt_any;
+    vcpu_guest_context_t *ctxt = &st_ctxt_any.c;
     char *image = NULL;
     unsigned long image_size;
     unsigned long nr_pages;
@@ -1079,14 +1080,14 @@ xc_hvm_build(int xc_handle, uint32_t dom
 
     free(image);
 
-    memset(ctxt, 0, sizeof(*ctxt));
+    memset(&st_ctxt_any, 0, sizeof(st_ctxt_any));
     ctxt->regs.ip = 0x80000000ffffffb0UL;
     ctxt->regs.ar.fpsr = xc_ia64_fpsr_default();
     ctxt->regs.cr.itir = 14 << 2;
     ctxt->regs.psr = IA64_PSR_AC | IA64_PSR_BN;
     ctxt->regs.cr.dcr = 0;
     ctxt->regs.cr.pta = 15 << 2;
-    return xc_vcpu_setcontext(xc_handle, domid, 0, ctxt);
+    return xc_vcpu_setcontext(xc_handle, domid, 0, &st_ctxt_any);
 
 error_out:
     free(image);
diff --git a/tools/libxc/ia64/xc_ia64_linux_restore.c 
b/tools/libxc/ia64/xc_ia64_linux_restore.c
--- a/tools/libxc/ia64/xc_ia64_linux_restore.c
+++ b/tools/libxc/ia64/xc_ia64_linux_restore.c
@@ -117,8 +117,9 @@ xc_ia64_recv_unallocated_list(int xc_han
 
 static int
 xc_ia64_recv_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
-                          uint32_t vcpu, vcpu_guest_context_t *ctxt)
+                          uint32_t vcpu, vcpu_guest_context_any_t *ctxt_any)
 {
+    vcpu_guest_context_t *ctxt = &ctxt_any->c;
     if (read_exact(io_fd, ctxt, sizeof(*ctxt))) {
         ERROR("Error when reading ctxt");
         return -1;
@@ -128,14 +129,14 @@ xc_ia64_recv_vcpu_context(int xc_handle,
 
     /* Initialize and set registers.  */
     ctxt->flags = VGCF_EXTRA_REGS | VGCF_SET_CR_IRR;
-    if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt) != 0) {
+    if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt_any) != 0) {
         ERROR("Couldn't set vcpu context");
         return -1;
     }
 
     /* Just a check.  */
     ctxt->flags = 0;
-    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
+    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt_any)) {
         ERROR("Could not get vcpu context");
         return -1;
     }
@@ -195,22 +196,23 @@ xc_ia64_pv_recv_context(int xc_handle, i
     unsigned long gmfn;
 
     /* A copy of the CPU context of the guest. */
-    vcpu_guest_context_t ctxt;
+    vcpu_guest_context_any_t ctxt_any;
+    vcpu_guest_context_t *ctxt = &ctxt_any.c;
 
     /* A temporary mapping of the guest's start_info page. */
     start_info_t *start_info;
 
-    if (lock_pages(&ctxt, sizeof(ctxt))) {
+    if (lock_pages(&ctxt_any, sizeof(ctxt_any))) {
         /* needed for build domctl, but might as well do early */
         ERROR("Unable to lock_pages ctxt");
         return -1;
     }
 
-    if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
+    if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt_any))
         goto out;
 
     /* Then get privreg page.  */
-    if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) {
+    if (read_page(xc_handle, io_fd, dom, ctxt->privregs_pfn) < 0) {
         ERROR("Could not read vcpu privregs");
         goto out;
     }
@@ -243,7 +245,7 @@ xc_ia64_pv_recv_context(int xc_handle, i
     rc = 0;
 
  out:
-    unlock_pages(&ctxt, sizeof(ctxt));
+    unlock_pages(&ctxt_any, sizeof(ctxt_any));
     return rc;
 }
 
@@ -314,12 +316,12 @@ xc_ia64_hvm_recv_context(int xc_handle, 
     /* vcpu context */
     for (i = 0; i <= info.max_vcpu_id; i++) {
         /* A copy of the CPU context of the guest. */
-        vcpu_guest_context_t ctxt;
+        vcpu_guest_context_any_t ctxt_any;
 
         if (!__test_bit(i, vcpumap))
             continue;
 
-        if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
+        if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
             goto out;
 
         // system context of vcpu is recieved as hvm context.
diff --git a/tools/libxc/ia64/xc_ia64_linux_save.c 
b/tools/libxc/ia64/xc_ia64_linux_save.c
--- a/tools/libxc/ia64/xc_ia64_linux_save.c
+++ b/tools/libxc/ia64/xc_ia64_linux_save.c
@@ -180,9 +180,10 @@ xc_ia64_send_unallocated_list(int xc_han
 
 static int
 xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
-                          uint32_t vcpu, vcpu_guest_context_t *ctxt)
+                          uint32_t vcpu, vcpu_guest_context_any_t *ctxt_any)
 {
-    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
+    vcpu_guest_context_t *ctxt = &ctxt_any->c;
+    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt_any)) {
         ERROR("Could not get vcpu context");
         return -1;
     }
@@ -211,14 +212,15 @@ xc_ia64_pv_send_context(int xc_handle, i
                         shared_info_t *live_shinfo)
 {
     /* A copy of the CPU context of the guest. */
-    vcpu_guest_context_t ctxt;
+    vcpu_guest_context_any_t ctxt_any;
+    vcpu_guest_context_t *ctxt = &ctxt_any.c;
     char *mem;
 
-    if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
+    if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt_any))
         return -1;
 
     mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
-                               PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
+                               PROT_READ|PROT_WRITE, ctxt->privregs_pfn);
     if (mem == NULL) {
         ERROR("cannot map privreg page");
         return -1;
@@ -297,12 +299,12 @@ xc_ia64_hvm_send_context(int xc_handle, 
     /* vcpu context */
     for (i = 0; i <= info->max_vcpu_id; i++) {
         /* A copy of the CPU context of the guest. */
-        vcpu_guest_context_t ctxt;
+        vcpu_guest_context_any_t ctxt_any;
 
         if (!__test_bit(i, vcpumap))
             continue;
 
-        if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
+        if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
             goto out;
 
         // system context of vcpu is sent as hvm context.
diff --git a/tools/libxc/xc_core_ia64.c b/tools/libxc/xc_core_ia64.c
--- a/tools/libxc/xc_core_ia64.c
+++ b/tools/libxc/xc_core_ia64.c
@@ -308,9 +308,10 @@ xc_core_arch_context_free(struct xc_core
 
 int
 xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
-                         vcpu_guest_context_t* ctxt,
+                         vcpu_guest_context_any_t* ctxt_any,
                          int xc_handle, uint32_t domid)
 {
+    vcpu_guest_context_t *ctxt = &ctxt_any->c;
     mapped_regs_t* mapped_regs;
 
     if ( ctxt->privregs_pfn == VGC_PRIVREGS_HVM )
diff --git a/tools/libxc/xc_core_ia64.h b/tools/libxc/xc_core_ia64.h
--- a/tools/libxc/xc_core_ia64.h
+++ b/tools/libxc/xc_core_ia64.h
@@ -40,7 +40,7 @@ xc_core_arch_context_free(struct xc_core
 xc_core_arch_context_free(struct xc_core_arch_context* arch_ctxt);
 int
 xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
-                         vcpu_guest_context_t* ctxt,
+                         vcpu_guest_context_any_t* ctxt,
                          int xc_handle, uint32_t domid);
 int
 xc_core_arch_context_get_shdr(struct xc_core_arch_context* arch_ctxt, 

-- 
yamahata

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.