[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 10/10] xen/arm: Implement toolstack for xl restore/save and migrate



From: Alexey Sokolov <sokolov.a@xxxxxxxxxxx>

Implement for xl restore/save (which are also used for migrate)
operation in xc_arm_migrate.c and make it compilable.
The overall process of save is the following:
1) save guest parameters (i.e., memory map, console and store pfn, etc)
2) save memory (if it is live, perform dirty-page tracing)
3) save hvm states (i.e., gic, timer, etc)
4) save vcpu registers (i.e., pc, sp, lr, etc)
The overall process of restore is the same to the one in save.

Singed-off-by: Alexey Sokolov <sokolov.a@xxxxxxxxxxx>
---
 config/arm32.mk              |   1 +
 tools/libxc/Makefile         |   5 +
 tools/libxc/xc_arm_migrate.c | 686 +++++++++++++++++++++++++++++++++++++++++++
 tools/misc/Makefile          |   4 +
 4 files changed, 696 insertions(+)
 create mode 100644 tools/libxc/xc_arm_migrate.c

diff --git a/config/arm32.mk b/config/arm32.mk
index 8e21158..0100ee2 100644
--- a/config/arm32.mk
+++ b/config/arm32.mk
@@ -1,6 +1,7 @@
 CONFIG_ARM := y
 CONFIG_ARM_32 := y
 CONFIG_ARM_$(XEN_OS) := y
+CONFIG_MIGRATE := y
 
 CONFIG_XEN_INSTALL_SUFFIX :=
 
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 512a994..05dfef4 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -42,8 +42,13 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
 GUEST_SRCS-y :=
 GUEST_SRCS-y += xg_private.c xc_suspend.c
 ifeq ($(CONFIG_MIGRATE),y)
+ifeq ($(CONFIG_X86),y)
 GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c
 GUEST_SRCS-y += xc_offline_page.c xc_compression.c
+endif
+ifeq ($(CONFIG_ARM),y)
+GUEST_SRCS-y += xc_arm_migrate.c
+endif
 else
 GUEST_SRCS-y += xc_nomigrate.c
 endif
diff --git a/tools/libxc/xc_arm_migrate.c b/tools/libxc/xc_arm_migrate.c
new file mode 100644
index 0000000..9f642f3
--- /dev/null
+++ b/tools/libxc/xc_arm_migrate.c
@@ -0,0 +1,686 @@
+/******************************************************************************
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  
USA
+ *
+ * Copyright (c) 2013, Samsung Electronics
+ */
+
+#include <inttypes.h>
+#include <errno.h>
+#include <xenctrl.h>
+#include <xenguest.h>
+
+#include <unistd.h>
+#include <xc_private.h>
+#include <xc_dom.h>
+#include "xc_bitops.h"
+#include "xg_private.h"
+
+#define DEF_MAX_ITERS   29        /* limit us to 30 times round loop   */
+#define DEF_MAX_FACTOR   3        /* never send more than 3x p2m_size  */
+#define DEF_MIN_DIRTY_PER_ITER 50 /* dirty page count to define last iter */
+#define DEF_PROGRESS_RATE 50      /* progress bar update rate */
+
+/*
+ * Guest params to save: used HVM params, save flags, memory map
+ */
+typedef struct guest_params {
+    unsigned long console_pfn;
+    unsigned long store_pfn;
+    uint32_t flags;
+    uint32_t mem_map_nr_entries;
+    struct dt_mem_info memmap; /* Memory map */
+} guest_params_t;
+
+static int suspend_and_state(int (*suspend)(void*), void *data,
+                             xc_interface *xch, int dom)
+{
+    xc_dominfo_t info;
+    if ( !(*suspend)(data) )
+    {
+        ERROR("Suspend request failed");
+        return -1;
+    }
+
+    if ( (xc_domain_getinfo(xch, dom, 1, &info) != 1) ||
+         !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
+    {
+        ERROR("Domain is not in suspended state after suspend attempt");
+        return -1;
+    }
+
+    return 0;
+}
+
+static int write_exact_handled(xc_interface *xch, int fd, const void *data,
+                               size_t size)
+{
+    if ( write_exact(fd, data, size) )
+    {
+        ERROR("Write failed, check space");
+        return -1;
+    }
+    return 0;
+}
+
+/* ============ Memory ============= */
+static int save_memory(xc_interface *xch, int io_fd, uint32_t dom,
+                       struct save_callbacks *callbacks,
+                       uint32_t max_iters, uint32_t max_factor,
+                       guest_params_t *params)
+{
+    int live = !!(params->flags & XCFLAGS_LIVE);
+    int debug = !!(params->flags & XCFLAGS_DEBUG);
+    const char zero = 0;
+    char reportbuf[80];
+    int iter = 0;
+    int last_iter = !live;
+    int total_dirty_pages_num = 0;
+    int dirty_pages_on_prev_iter_num = 0;
+
+    DECLARE_HYPERCALL_BUFFER(unsigned long, to_send);
+
+    /* We suppose that guest's memory base is the first region base */
+    xen_pfn_t start = (params->memmap.bank[0].start  >> PAGE_SHIFT);
+    const xen_pfn_t end = xc_domain_maximum_gpfn(xch, dom);
+    const xen_pfn_t mem_size = end - start;
+    xen_pfn_t i;
+
+    if ( write_exact_handled(xch, io_fd, &end, sizeof(xen_pfn_t)) )
+        return -1;
+
+    if ( live )
+    {
+        if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
+                    NULL, 0, NULL, 0, NULL) < 0 )
+        {
+            ERROR("Couldn't enable log-dirty mode !\n");
+            return -1;
+        }
+        if ( debug )
+            IPRINTF("Log-dirty mode enabled!\n");
+
+        max_iters  = max_iters  ? : DEF_MAX_ITERS;
+        max_factor = max_factor ? : DEF_MAX_FACTOR;
+    }
+
+    to_send = xc_hypercall_buffer_alloc_pages(xch, to_send,
+                                              NRPAGES(bitmap_size(mem_size)));
+    if ( !to_send )
+    {
+        ERROR("Couldn't allocate to_send array!\n");
+        return -1;
+    }
+
+    /* send all pages on first iter */
+    memset(to_send, 0xff, bitmap_size(mem_size));
+
+    for ( ; ; )
+    {
+        int dirty_pages_on_current_iter_num = 0;
+        int frc;
+        iter++;
+
+        snprintf(reportbuf, sizeof(reportbuf),
+                 "Saving memory: iter %d (last sent %u)",
+                 iter, dirty_pages_on_prev_iter_num);
+
+        xc_report_progress_start(xch, reportbuf, mem_size);
+
+        if ( (iter > 1 &&
+              dirty_pages_on_prev_iter_num < DEF_MIN_DIRTY_PER_ITER) ||
+             (iter == max_iters) ||
+             (total_dirty_pages_num >= mem_size*max_factor) )
+        {
+            if ( debug )
+                IPRINTF("Last iteration");
+            last_iter = 1;
+        }
+
+        if ( last_iter )
+        {
+            if ( suspend_and_state(callbacks->suspend, callbacks->data,
+                                   xch, dom) )
+            {
+                ERROR("Domain appears not to have suspended");
+                return -1;
+            }
+        }
+        if ( live && iter > 1 )
+        {
+            frc = xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_CLEAN,
+                                    HYPERCALL_BUFFER(to_send), mem_size,
+                                                     NULL, 0, NULL);
+            if ( frc != mem_size )
+            {
+                ERROR("Error peeking shadow bitmap");
+                xc_hypercall_buffer_free_pages(xch, to_send,
+                                               NRPAGES(bitmap_size(mem_size)));
+                return -1;
+            }
+        }
+
+        for ( i = start; i < end; ++i )
+        {
+            if ( test_bit(i - start, to_send) )
+            {
+                const char one = 1;
+                char *page = xc_map_foreign_range(xch, dom, PAGE_SIZE,
+                                                  PROT_READ | PROT_WRITE, i);
+                if ( !page )
+                {
+                    PERROR("xc_map_foreign_range failed, pfn=%llx", i);
+                    return -1;
+                }
+
+                if ( write_exact_handled(xch, io_fd, &one, 1) ||
+                     write_exact_handled(xch, io_fd, &i, sizeof(i)) ||
+                     write_exact_handled(xch, io_fd, page, PAGE_SIZE) )
+                {
+                    munmap(page, PAGE_SIZE);
+                    return -1;
+                }
+                munmap(page, PAGE_SIZE);
+
+                if ( (i % DEF_PROGRESS_RATE) == 0 )
+                    xc_report_progress_step(xch, i - start, mem_size);
+                dirty_pages_on_current_iter_num++;
+            }
+        }
+
+        if ( debug )
+            IPRINTF("Dirty pages=%d", dirty_pages_on_current_iter_num);
+        xc_report_progress_step(xch, mem_size, mem_size);
+
+        dirty_pages_on_prev_iter_num = dirty_pages_on_current_iter_num;
+        total_dirty_pages_num += dirty_pages_on_current_iter_num;
+
+        if ( last_iter )
+        {
+            xc_hypercall_buffer_free_pages(xch, to_send,
+                                           NRPAGES(bitmap_size(mem_size)));
+            if ( live )
+            {
+                if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_OFF,
+                                       NULL, 0, NULL, 0, NULL) < 0 )
+                    ERROR("Couldn't disable log-dirty mode");
+            }
+            break;
+        }
+    }
+    return write_exact_handled(xch, io_fd, &zero, 1);
+}
+
+static int restore_memory(xc_interface *xch, int io_fd, uint32_t dom,
+                          guest_params_t *params)
+{
+    xen_pfn_t end;
+    xen_pfn_t gpfn;
+
+    /* We suppose that guest's memory base is the first region base */
+    xen_pfn_t start = (params->memmap.bank[0].start >> PAGE_SHIFT);
+
+    if ( read_exact(io_fd, &end, sizeof(xen_pfn_t)) )
+    {
+        PERROR("First read of incoming memory failed");
+        return -1;
+    }
+
+    /* TODO allocate several pages per call */
+    for ( gpfn = start; gpfn < end; ++gpfn )
+    {
+        if ( xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &gpfn) )
+        {
+            PERROR("Memory allocation for a new domain failed");
+            return -1;
+        }
+    }
+    while ( 1 )
+    {
+        char new_page;
+        xen_pfn_t gpfn;
+        char *page;
+        if ( read_exact(io_fd, &new_page, 1) )
+        {
+            PERROR("End-checking flag read failed during memory transfer");
+            return -1;
+        }
+        if ( !new_page )
+            break;
+
+        if ( read_exact(io_fd, &gpfn, sizeof(gpfn)) )
+        {
+            PERROR("GPFN read failed during memory transfer");
+            return -1;
+        }
+        if ( gpfn < start || gpfn >= end )
+        {
+            ERROR("GPFN %llx doesn't belong to RAM address space", gpfn);
+            return -1;
+        }
+        page = xc_map_foreign_range(xch, dom, PAGE_SIZE,
+                                    PROT_READ | PROT_WRITE, gpfn);
+        if ( !page )
+        {
+            PERROR("xc_map_foreign_range failed, pfn=%llx", gpfn);
+            return -1;
+        }
+        if ( read_exact(io_fd, page, PAGE_SIZE) )
+        {
+            PERROR("Page data read failed during memory transfer");
+            return -1;
+        }
+        munmap(page, PAGE_SIZE);
+    }
+
+    return 0;
+}
+
+/* ============ HVM context =========== */
+static int save_armhvm(xc_interface *xch, int io_fd, uint32_t dom, int debug)
+{
+    /* HVM: a buffer for holding HVM context */
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    uint32_t rec_size;
+    int retval = -1;
+
+    /* Need another buffer for HVM context */
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0);
+    if ( hvm_buf_size == -1 )
+    {
+        ERROR("Couldn't get HVM context size from Xen");
+        goto out;
+    }
+    hvm_buf = malloc(hvm_buf_size);
+
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory for hvm buffer");
+        goto out;
+    }
+
+    /* Get HVM context from Xen and save it too */
+    if ( (rec_size = xc_domain_hvm_getcontext(xch, dom, hvm_buf,
+                    hvm_buf_size)) == -1 )
+    {
+        ERROR("HVM:Could not get hvm buffer");
+        goto out;
+    }
+
+    if ( debug )
+        IPRINTF("HVM save size %d %d", hvm_buf_size, rec_size);
+
+    if ( write_exact_handled(xch, io_fd, &rec_size, sizeof(uint32_t)) )
+        goto out;
+
+    if ( write_exact_handled(xch, io_fd, hvm_buf, rec_size) )
+    {
+        goto out;
+    }
+    retval = 0;
+
+out:
+    if ( hvm_buf )
+        free (hvm_buf);
+    return retval;
+}
+
+static int restore_armhvm(xc_interface *xch, int io_fd,
+                          uint32_t dom, int debug)
+{
+    uint32_t rec_size;
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    int frc = 0;
+    int retval = -1;
+
+    if ( read_exact(io_fd, &rec_size, sizeof(uint32_t)) )
+    {
+        PERROR("Could not read HVM size");
+        goto out;
+    }
+
+    if ( !rec_size )
+    {
+        ERROR("Zero HVM size");
+        goto out;
+    }
+
+    if ( debug )
+    {
+        IPRINTF("HVM restore size %d %d", hvm_buf_size, rec_size);
+    }
+
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0);
+    if ( hvm_buf_size != rec_size )
+    {
+        ERROR("HVM size for this domain is not the same as stored");
+    }
+
+    hvm_buf = malloc(hvm_buf_size);
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory");
+        goto out;
+    }
+
+    if ( read_exact(io_fd, hvm_buf, hvm_buf_size) )
+    {
+        PERROR("Could not read HVM context");
+        goto out;
+    }
+
+    frc = xc_domain_hvm_setcontext(xch, dom, hvm_buf, hvm_buf_size);
+    if ( frc )
+    {
+        ERROR("error setting the HVM context");
+        goto out;
+    }
+    retval = 0;
+
+out:
+    if ( hvm_buf )
+        free (hvm_buf);
+    return retval;
+}
+
+
+/* ================= Console & Xenstore & Memory map =========== */
+
+static guest_params_t * save_guest_params(xc_interface *xch, int io_fd,
+                                          uint32_t dom, uint32_t flags)
+{
+
+    guest_params_t *p = NULL;
+    size_t sz = sizeof(guest_params_t);
+
+    p = malloc(sz);
+    if ( p == NULL )
+    {
+       ERROR("Couldn't allocate memory");
+       return NULL;
+    }
+
+    if ( xc_domain_get_memory_map(xch, dom, &p->memmap) )
+    {
+        ERROR("Can't get memory map");
+        free(p);
+        return NULL;
+    }
+
+    if ( flags & XCFLAGS_DEBUG )
+    {
+        IPRINTF("Guest param save size: %d ", sz);
+        IPRINTF("Guest memory map save %d entries", p->memmap.nr_banks);
+    }
+
+    if ( xc_get_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, &p->console_pfn) )
+    {
+        ERROR("Can't get console gpfn");
+        free (p);
+        return NULL;
+    }
+
+    if ( xc_get_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, &p->store_pfn) )
+    {
+        ERROR("Can't get store gpfn");
+        free (p);
+        return NULL;
+    }
+
+    p->flags = flags;
+
+    if ( write_exact_handled(xch, io_fd, p, sz) )
+    {
+        free (p);
+        return NULL;
+    }
+    return p;
+}
+
+static guest_params_t * restore_guest_params(xc_interface *xch, int io_fd,
+                                             uint32_t dom)
+{
+    guest_params_t *p = NULL;
+    size_t sz = sizeof(guest_params_t);
+
+    p = malloc(sz);
+    if ( p == NULL )
+    {
+        ERROR("Couldn't allocate memory");
+        return NULL;
+    }
+
+    if ( read_exact(io_fd, p, sizeof(guest_params_t)) )
+    {
+        PERROR("Can't read guest params");
+        free(p);
+        return NULL;
+    }
+
+    if ( p->flags & XCFLAGS_DEBUG )
+    {
+        IPRINTF("Guest param restore size: %d ", sz);
+        IPRINTF("Guest memory map restore %d entries", p->memmap.nr_banks);
+    }
+
+    if ( xc_domain_set_memory_map(xch, dom, &p->memmap) )
+    {
+        free (p);
+        ERROR("Can't set memory map");
+        return NULL;
+    }
+    return p;
+}
+
+static int set_guest_params(xc_interface *xch, int io_fd, uint32_t dom,
+                            guest_params_t *params, unsigned int 
console_evtchn,
+                            domid_t console_domid, unsigned int store_evtchn,
+                            domid_t store_domid)
+{
+    int rc = 0;
+
+    if ( (rc = xc_clear_domain_page(xch, dom, params->console_pfn)) )
+    {
+        ERROR("Can't clear console page");
+        return rc;
+    }
+
+    if ( (rc = xc_clear_domain_page(xch, dom, params->store_pfn)) )
+    {
+        ERROR("Can't clear xenstore page");
+        return rc;
+    }
+
+    if ( (rc = xc_dom_gnttab_hvm_seed(xch, dom, params->console_pfn,
+                                      params->store_pfn, console_domid,
+                                      store_domid)) )
+    {
+        ERROR("Can't grant console and xenstore pages");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
+                                params->console_pfn)) )
+    {
+        ERROR("Can't set console gpfn");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
+                                params->store_pfn)) )
+    {
+        ERROR("Can't set xenstore gpfn");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_EVTCHN,
+                                console_evtchn)) )
+    {
+        ERROR("Can't set console event channel");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_EVTCHN,
+                                store_evtchn)) )
+    {
+        ERROR("Can't set xenstore event channel");
+        return rc;
+    }
+    return 0;
+}
+
+/* ====================== VCPU ============== */
+static int save_vcpu(xc_interface *xch, int io_fd, uint32_t dom)
+{
+    vcpu_guest_context_any_t ctxt;
+    xc_vcpu_getcontext(xch, dom, 0, &ctxt);
+    return write_exact_handled(xch, io_fd, &ctxt, sizeof(ctxt));
+}
+
+static int restore_vcpu(xc_interface *xch, int io_fd, uint32_t dom)
+{
+    int rc = -1;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
+
+    ctxt = xc_hypercall_buffer_alloc(xch, ctxt, sizeof(*ctxt));
+    memset(ctxt, 0, sizeof(*ctxt));
+
+    if ( read_exact(io_fd, ctxt, sizeof(*ctxt)) )
+    {
+        PERROR("VCPU context read failed");
+        goto out;
+    }
+
+    memset(&domctl, 0, sizeof(domctl));
+    domctl.cmd = XEN_DOMCTL_setvcpucontext;
+    domctl.domain = dom;
+    domctl.u.vcpucontext.vcpu = 0;
+    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+    rc = do_domctl(xch, &domctl);
+    if ( rc )
+        ERROR("VCPU context set failed (error %d)", rc);
+
+out:
+    xc_hypercall_buffer_free(xch, ctxt);
+    return rc;
+}
+
+/* ================== Main ============== */
+int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
+                   uint32_t max_iters, uint32_t max_factor, uint32_t flags,
+                   struct save_callbacks *callbacks, int hvm,
+                   unsigned long vm_generationid_addr)
+{
+    int debug = !!(flags & XCFLAGS_DEBUG);
+    guest_params_t *params = NULL;
+
+    if ( (params = save_guest_params(xch, io_fd, dom, flags)) == NULL )
+    {
+       ERROR("Can't save guest params");
+       return -1;
+    }
+
+    if ( save_memory(xch, io_fd, dom, callbacks, max_iters,
+            max_factor, params) )
+    {
+        ERROR("Memory not saved");
+        free(params);
+        return -1;
+    }
+
+    if ( save_armhvm(xch, io_fd, dom, debug) )
+    {
+        ERROR("HVM not saved");
+        free(params);
+        return -1;
+    }
+
+    if ( save_vcpu(xch, io_fd, dom) )
+    {
+        ERROR("VCPU not saved");
+        free(params);
+        return -1;
+    }
+    free(params);
+    return 0;
+}
+
+int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
+                      unsigned int store_evtchn, unsigned long *store_gpfn,
+                      domid_t store_domid, unsigned int console_evtchn,
+                      unsigned long *console_gpfn, domid_t console_domid,
+                      unsigned int hvm, unsigned int pae, int superpages,
+                      int no_incr_generationid,
+                      unsigned long *vm_generationid_addr,
+                      struct restore_callbacks *callbacks)
+{
+    guest_params_t *params = NULL;
+    int debug = 0;
+
+    if ( (params = restore_guest_params(xch, io_fd, dom)) == NULL )
+    {
+        ERROR("Can't restore guest params");
+        return -1;
+    }
+    debug = !!( params->flags & XCFLAGS_DEBUG );
+
+    if ( restore_memory(xch, io_fd, dom, params) )
+    {
+        ERROR("Can't restore memory");
+        free(params);
+        return -1;
+    }
+    if ( set_guest_params(xch, io_fd, dom, params,
+                console_evtchn, console_domid,
+                store_evtchn, store_domid) )
+    {
+        ERROR("Can't setup guest params");
+        free(params);
+        return -1;
+    }
+
+    /* Setup console and store PFNs to caller */
+    *console_gpfn = params->console_pfn;
+    *store_gpfn = params->store_pfn;
+
+    if ( restore_armhvm(xch, io_fd, dom, debug) )
+    {
+        ERROR("HVM not restored");
+        free(params);
+        return -1;
+    }
+
+    if ( restore_vcpu(xch, io_fd, dom) )
+    {
+        ERROR("Can't restore VCPU");
+        free(params);
+        return -1;
+    }
+
+    free(params);
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/misc/Makefile b/tools/misc/Makefile
index 520ef80..5338f87 100644
--- a/tools/misc/Makefile
+++ b/tools/misc/Makefile
@@ -11,7 +11,9 @@ HDRS     = $(wildcard *.h)
 
 TARGETS-y := xenperf xenpm xen-tmem-list-parse gtraceview gtracestat 
xenlockprof xenwatchdogd xencov
 TARGETS-$(CONFIG_X86) += xen-detect xen-hvmctx xen-hvmcrash xen-lowmemd
+ifeq ($(CONFIG_X86),y)
 TARGETS-$(CONFIG_MIGRATE) += xen-hptool
+endif
 TARGETS := $(TARGETS-y)
 
 SUBDIRS-$(CONFIG_LOMOUNT) += lomount
@@ -25,7 +27,9 @@ INSTALL_BIN := $(INSTALL_BIN-y)
 INSTALL_SBIN-y := xm xen-bugtool xen-python-path xend xenperf xsview xenpm 
xen-tmem-list-parse gtraceview \
        gtracestat xenlockprof xenwatchdogd xen-ringwatch xencov
 INSTALL_SBIN-$(CONFIG_X86) += xen-hvmctx xen-hvmcrash xen-lowmemd
+ifeq ($(CONFIG_X86),y)
 INSTALL_SBIN-$(CONFIG_MIGRATE) += xen-hptool
+endif
 INSTALL_SBIN := $(INSTALL_SBIN-y)
 
 INSTALL_PRIVBIN-y := xenpvnetboot
-- 
1.8.1.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.