[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] PATCH 1/10: Add a QEMU machine type for fullvirt guests



This patch does a (no functional change) re-arrangement of the code for starting
up a fully virtualized guest. In particular it creates a new QEMU machine type
for Xen fullyvirt guests which can be specified with '-M xenfv'. For 
compatability
this is in fact made to be the default. The code for setting up memory maps is
moved out of vl.c, and into hw/xen_machine_fv.c. This is basically to ensure 
that it can be easily skipped when we add a paravirt machine type in the next
patch.

 b/tools/ioemu/hw/xen_machine_fv.c |  288 ++++++++++++++++++++++++++++++++++++++
 tools/ioemu/Makefile.target       |    1 
 tools/ioemu/vl.c                  |  244 --------------------------------
 tools/ioemu/vl.h                  |    3 
 4 files changed, 297 insertions(+), 239 deletions(-)

   Signed-off-by: Daniel P. Berrange <berrange@xxxxxxxxxx>


diff -r b28ae5f00553 tools/ioemu/Makefile.target
--- a/tools/ioemu/Makefile.target       Tue Oct 23 09:26:43 2007 +0100
+++ b/tools/ioemu/Makefile.target       Wed Oct 24 15:20:39 2007 -0400
@@ -409,6 +409,7 @@ VL_OBJS+= piix4acpi.o
 VL_OBJS+= piix4acpi.o
 VL_OBJS+= xenstore.o
 VL_OBJS+= xen_platform.o
+VL_OBJS+= xen_machine_fv.o
 VL_OBJS+= tpm_tis.o
 CPPFLAGS += -DHAS_AUDIO
 endif
diff -r b28ae5f00553 tools/ioemu/hw/xen_machine_fv.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ioemu/hw/xen_machine_fv.c   Wed Oct 24 15:24:39 2007 -0400
@@ -0,0 +1,288 @@
+/*
+ * QEMU Xen FV Machine
+ *
+ * Copyright (c) 2003-2007 Fabrice Bellard
+ * Copyright (c) 2007 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "vl.h"
+#include <xen/hvm/params.h>
+#include <sys/mman.h>
+
+#if defined(MAPCACHE)
+
+#if defined(__i386__) 
+#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
+#define MCACHE_BUCKET_SHIFT 16
+#elif defined(__x86_64__)
+#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
+#define MCACHE_BUCKET_SHIFT 20
+#endif
+
+#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
+
+#define BITS_PER_LONG (sizeof(long)*8)
+#define BITS_TO_LONGS(bits) \
+    (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#define DECLARE_BITMAP(name,bits) \
+    unsigned long name[BITS_TO_LONGS(bits)]
+#define test_bit(bit,map) \
+    (!!((map)[(bit)/BITS_PER_LONG] & (1UL << ((bit)%BITS_PER_LONG))))
+
+struct map_cache {
+    unsigned long paddr_index;
+    uint8_t      *vaddr_base;
+    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
+};
+
+static struct map_cache *mapcache_entry;
+static unsigned long nr_buckets;
+
+/* For most cases (>99.9%), the page address is the same. */
+static unsigned long last_address_index = ~0UL;
+static uint8_t      *last_address_vaddr;
+
+static int qemu_map_cache_init(void)
+{
+    unsigned long size;
+
+    nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
+                   (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
+                  (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
+
+    /*
+     * Use mmap() directly: lets us allocate a big hash table with no up-front
+     * cost in storage space. The OS will allocate memory only for the buckets
+     * that we actually use. All others will contain all zeroes.
+     */
+    size = nr_buckets * sizeof(struct map_cache);
+    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+    fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", 
nr_buckets, size);
+    mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
+                          MAP_SHARED|MAP_ANON, -1, 0);
+    if (mapcache_entry == MAP_FAILED) {
+        errno = ENOMEM;
+        return -1;
+    }
+
+    return 0;
+}
+
+static void qemu_remap_bucket(struct map_cache *entry,
+                              unsigned long address_index)
+{
+    uint8_t *vaddr_base;
+    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
+    unsigned int i, j;
+
+    if (entry->vaddr_base != NULL) {
+        errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
+        if (errno) {
+            fprintf(logfile, "unmap fails %d\n", errno);
+            exit(-1);
+        }
+    }
+
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
+        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
+
+    vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
+                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
+    if (vaddr_base == NULL) {
+        fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
+        exit(-1);
+    }
+
+    entry->vaddr_base  = vaddr_base;
+    entry->paddr_index = address_index;
+
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
+        unsigned long word = 0;
+        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
+            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
+        while (j > 0)
+            word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
+        entry->valid_mapping[i / BITS_PER_LONG] = word;
+    }
+}
+
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
+{
+    struct map_cache *entry;
+    unsigned long address_index  = phys_addr >> MCACHE_BUCKET_SHIFT;
+    unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
+
+    if (address_index == last_address_index)
+        return last_address_vaddr + address_offset;
+
+    entry = &mapcache_entry[address_index % nr_buckets];
+
+    if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
+        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+        qemu_remap_bucket(entry, address_index);
+
+    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+        return NULL;
+
+    last_address_index = address_index;
+    last_address_vaddr = entry->vaddr_base;
+
+    return last_address_vaddr + address_offset;
+}
+
+void qemu_invalidate_map_cache(void)
+{
+    unsigned long i;
+
+    mapcache_lock();
+
+    for (i = 0; i < nr_buckets; i++) {
+        struct map_cache *entry = &mapcache_entry[i];
+
+        if (entry->vaddr_base == NULL)
+            continue;
+
+        errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
+        if (errno) {
+            fprintf(logfile, "unmap fails %d\n", errno);
+            exit(-1);
+        }
+
+        entry->paddr_index = 0;
+        entry->vaddr_base  = NULL;
+    }
+
+    last_address_index =  ~0UL;
+    last_address_vaddr = NULL;
+
+    mapcache_unlock();
+}
+
+#endif /* defined(MAPCACHE) */
+
+
+static void xen_init_fv(uint64_t ram_size, int vga_ram_size, char *boot_device,
+                        DisplayState *ds, const char **fd_filename,
+                        int snapshot,
+                        const char *kernel_filename,
+                        const char *kernel_cmdline,
+                        const char *initrd_filename,
+                        const char *direct_pci)
+{
+    unsigned long ioreq_pfn;
+    extern void *shared_page;
+    extern void *buffered_io_page;
+#ifdef __ia64__
+    unsigned long nr_pages;
+    xen_pfn_t *page_array;
+    extern void *buffered_pio_page;
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+
+    if (qemu_map_cache_init()) {
+        fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno);
+        exit(-1);
+    }
+
+    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+    fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
+    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE, ioreq_pfn);
+    if (shared_page == NULL) {
+        fprintf(logfile, "map shared IO page returned error %d\n", errno);
+        exit(-1);
+    }
+
+    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
+    fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
+    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                            PROT_READ|PROT_WRITE, ioreq_pfn);
+    if (buffered_io_page == NULL) {
+        fprintf(logfile, "map buffered IO page returned error %d\n", errno);
+        exit(-1);
+    }
+
+#elif defined(__ia64__)
+
+    nr_pages = ram_size/PAGE_SIZE;
+
+    page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
+    if (page_array == NULL) {
+        fprintf(logfile, "malloc returned error %d\n", errno);
+        exit(-1);
+    }
+
+    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       IO_PAGE_START >> PAGE_SHIFT);
+
+    buffered_io_page =xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       BUFFER_IO_PAGE_START >> PAGE_SHIFT);
+
+    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       BUFFER_PIO_PAGE_START >> PAGE_SHIFT);
+
+    for (i = 0; i < nr_pages; i++)
+        page_array[i] = i;
+       
+    /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
+       to make QEMU map continuous virtual memory space */
+    if (ram_size > MMIO_START) {       
+        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
+            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
+                (STORE_PAGE_START >> PAGE_SHIFT); 
+    }
+
+    phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
+                                         PROT_READ|PROT_WRITE,
+                                         page_array, nr_pages);
+    if (phys_ram_base == 0) {
+        fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
+        exit(-1);
+    }
+    free(page_array);
+#endif
+
+    timeoffset_get();
+
+
+    pc_machine.init(ram_size, vga_ram_size, boot_device, ds, fd_filename,
+                    snapshot, kernel_filename, kernel_cmdline, initrd_filename,
+                    direct_pci);
+}
+
+QEMUMachine xenfv_machine = {
+    "xenfv",
+    "Xen Fully-virtualized PC",
+    xen_init_fv,
+};
+
+/*
+ * Local variables:
+ *  indent-tabs-mode: nil
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -r b28ae5f00553 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Tue Oct 23 09:26:43 2007 +0100
+++ b/tools/ioemu/vl.c  Wed Oct 24 15:21:37 2007 -0400
@@ -96,7 +96,6 @@
 
 #include "exec-all.h"
 
-#include <xen/hvm/params.h>
 #define DEFAULT_NETWORK_SCRIPT "/etc/xen/qemu-ifup"
 #ifdef _BSD
 #define DEFAULT_BRIDGE "bridge0"
@@ -6696,8 +6695,12 @@ void register_machines(void)
 void register_machines(void)
 {
 #if defined(TARGET_I386)
+#ifndef CONFIG_DM
     qemu_register_machine(&pc_machine);
     qemu_register_machine(&isapc_machine);
+#else
+    qemu_register_machine(&xenfv_machine);
+#endif
 #elif defined(TARGET_PPC)
     qemu_register_machine(&heathrow_machine);
     qemu_register_machine(&core99_machine);
@@ -6905,156 +6908,6 @@ int set_mm_mapping(int xc_handle, uint32
     return 0;
 }
 
-#if defined(MAPCACHE)
-
-#if defined(__i386__) 
-#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
-#define MCACHE_BUCKET_SHIFT 16
-#elif defined(__x86_64__)
-#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
-#define MCACHE_BUCKET_SHIFT 20
-#endif
-
-#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
-
-#define BITS_PER_LONG (sizeof(long)*8)
-#define BITS_TO_LONGS(bits) \
-    (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-#define DECLARE_BITMAP(name,bits) \
-    unsigned long name[BITS_TO_LONGS(bits)]
-#define test_bit(bit,map) \
-    (!!((map)[(bit)/BITS_PER_LONG] & (1UL << ((bit)%BITS_PER_LONG))))
-
-struct map_cache {
-    unsigned long paddr_index;
-    uint8_t      *vaddr_base;
-    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
-};
-
-static struct map_cache *mapcache_entry;
-static unsigned long nr_buckets;
-
-/* For most cases (>99.9%), the page address is the same. */
-static unsigned long last_address_index = ~0UL;
-static uint8_t      *last_address_vaddr;
-
-static int qemu_map_cache_init(void)
-{
-    unsigned long size;
-
-    nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
-                   (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
-                  (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
-
-    /*
-     * Use mmap() directly: lets us allocate a big hash table with no up-front
-     * cost in storage space. The OS will allocate memory only for the buckets
-     * that we actually use. All others will contain all zeroes.
-     */
-    size = nr_buckets * sizeof(struct map_cache);
-    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
-    fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", 
nr_buckets, size);
-    mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
-                          MAP_SHARED|MAP_ANON, -1, 0);
-    if (mapcache_entry == MAP_FAILED) {
-        errno = ENOMEM;
-        return -1;
-    }
-
-    return 0;
-}
-
-static void qemu_remap_bucket(struct map_cache *entry,
-                              unsigned long address_index)
-{
-    uint8_t *vaddr_base;
-    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
-    unsigned int i, j;
-
-    if (entry->vaddr_base != NULL) {
-        errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
-        if (errno) {
-            fprintf(logfile, "unmap fails %d\n", errno);
-            exit(-1);
-        }
-    }
-
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
-        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
-
-    vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
-                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
-    if (vaddr_base == NULL) {
-        fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
-        exit(-1);
-    }
-
-    entry->vaddr_base  = vaddr_base;
-    entry->paddr_index = address_index;
-
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
-        unsigned long word = 0;
-        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
-            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
-        while (j > 0)
-            word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
-        entry->valid_mapping[i / BITS_PER_LONG] = word;
-    }
-}
-
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
-{
-    struct map_cache *entry;
-    unsigned long address_index  = phys_addr >> MCACHE_BUCKET_SHIFT;
-    unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
-
-    if (address_index == last_address_index)
-        return last_address_vaddr + address_offset;
-
-    entry = &mapcache_entry[address_index % nr_buckets];
-
-    if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
-        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
-        qemu_remap_bucket(entry, address_index);
-
-    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
-        return NULL;
-
-    last_address_index = address_index;
-    last_address_vaddr = entry->vaddr_base;
-
-    return last_address_vaddr + address_offset;
-}
-
-void qemu_invalidate_map_cache(void)
-{
-    unsigned long i;
-
-    mapcache_lock();
-
-    for (i = 0; i < nr_buckets; i++) {
-        struct map_cache *entry = &mapcache_entry[i];
-
-        if (entry->vaddr_base == NULL)
-            continue;
-
-        errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
-        if (errno) {
-            fprintf(logfile, "unmap fails %d\n", errno);
-            exit(-1);
-        }
-
-        entry->paddr_index = 0;
-        entry->vaddr_base  = NULL;
-    }
-
-    last_address_index =  ~0UL;
-    last_address_vaddr = NULL;
-
-    mapcache_unlock();
-}
-
-#endif /* defined(MAPCACHE) */
 
 int main(int argc, char **argv)
 {
@@ -7089,15 +6942,7 @@ int main(int argc, char **argv)
     char usb_devices[MAX_USB_CMDLINE][128];
     int usb_devices_index;
     int fds[2];
-    unsigned long ioreq_pfn;
-    extern void *shared_page;
-    extern void *buffered_io_page;
     struct rlimit rl;
-#ifdef __ia64__
-    unsigned long nr_pages;
-    xen_pfn_t *page_array;
-    extern void *buffered_pio_page;
-#endif
     sigset_t set;
     char qemu_dm_logfilename[128];
     const char *direct_pci = NULL;
@@ -7681,6 +7526,7 @@ int main(int argc, char **argv)
 
 #ifdef CONFIG_DM
     bdrv_init();
+    xc_handle = xc_interface_open();
     xenstore_parse_domain_config(domid);
 #endif /* CONFIG_DM */
 
@@ -7774,83 +7620,6 @@ int main(int argc, char **argv)
        }
        phys_ram_size += ret;
     }
-#endif /* !CONFIG_DM */
-
-#ifdef CONFIG_DM
-
-    xc_handle = xc_interface_open();
-
-#if defined(__i386__) || defined(__x86_64__)
-
-    if (qemu_map_cache_init()) {
-        fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno);
-        exit(-1);
-    }
-
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
-    fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
-    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE, ioreq_pfn);
-    if (shared_page == NULL) {
-        fprintf(logfile, "map shared IO page returned error %d\n", errno);
-        exit(-1);
-    }
-
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
-    fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
-    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                            PROT_READ|PROT_WRITE, ioreq_pfn);
-    if (buffered_io_page == NULL) {
-        fprintf(logfile, "map buffered IO page returned error %d\n", errno);
-        exit(-1);
-    }
-
-#elif defined(__ia64__)
-
-    nr_pages = ram_size/PAGE_SIZE;
-
-    page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
-    if (page_array == NULL) {
-        fprintf(logfile, "malloc returned error %d\n", errno);
-        exit(-1);
-    }
-
-    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       IO_PAGE_START >> PAGE_SHIFT);
-
-    buffered_io_page =xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       BUFFER_IO_PAGE_START >> PAGE_SHIFT);
-
-    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       BUFFER_PIO_PAGE_START >> PAGE_SHIFT);
-
-    for (i = 0; i < nr_pages; i++)
-        page_array[i] = i;
-       
-    /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
-       to make QEMU map continuous virtual memory space */
-    if (ram_size > MMIO_START) {       
-        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
-            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
-                (STORE_PAGE_START >> PAGE_SHIFT); 
-    }
-
-    phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
-                                         PROT_READ|PROT_WRITE,
-                                         page_array, nr_pages);
-    if (phys_ram_base == 0) {
-        fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
-        exit(-1);
-    }
-    free(page_array);
-#endif
-
-    timeoffset_get();
-
-#else  /* !CONFIG_DM */
 
     phys_ram_base = qemu_vmalloc(phys_ram_size);
     if (!phys_ram_base) {
@@ -7858,9 +7627,6 @@ int main(int argc, char **argv)
         exit(1);
     }
 
-#endif /* !CONFIG_DM */
-
-#ifndef CONFIG_DM
     /* we always create the cdrom drive, even if no disk is there */
     bdrv_init();
     if (cdrom_index >= 0) {
diff -r b28ae5f00553 tools/ioemu/vl.h
--- a/tools/ioemu/vl.h  Tue Oct 23 09:26:43 2007 +0100
+++ b/tools/ioemu/vl.h  Wed Oct 24 15:20:39 2007 -0400
@@ -1108,6 +1108,9 @@ extern void pci_piix4_acpi_init(PCIBus *
 /* pc.c */
 extern QEMUMachine pc_machine;
 extern QEMUMachine isapc_machine;
+#ifdef CONFIG_DM
+extern QEMUMachine xenfv_machine;
+#endif
 extern int fd_bootchk;
 
 void ioport_set_a20(int enable);

-- 
|=- Red Hat, Engineering, Emerging Technologies, Boston.  +1 978 392 2496 -=|
|=-           Perl modules: http://search.cpan.org/~danberr/              -=|
|=-               Projects: http://freshmeat.net/~danielpb/               -=|
|=-  GnuPG: 7D3B9505   F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505  -=| 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.