[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] ioemu: directly project all memory on x86_64



ioemu: directly project all memory on x86_64
On x86_64, we have enough virtual addressing space to just directly
project all guest's physical memory. We still need to keep the map
cache, however, for some random addressing uses like acpi.
For this, we need to know the amount of memory, so let's have the domain
builder always pass it, not only for ia64 (it's harmless anyway).

Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx>

diff -r c364f80eb4b5 tools/ioemu/hw/xen_machine_fv.c
--- a/tools/ioemu/hw/xen_machine_fv.c   Wed Jan 23 13:27:21 2008 +0000
+++ b/tools/ioemu/hw/xen_machine_fv.c   Wed Jan 23 15:37:19 2008 +0000
@@ -26,6 +26,52 @@
 #include "vl.h"
 #include <xen/hvm/params.h>
 #include <sys/mman.h>
+
+#if defined(__x86_64__) || defined(__ia64__)
+static void phys_ram_base_reinit(void)
+{
+    unsigned long nr_pages;
+    xen_pfn_t *page_array;
+    int i;
+
+    if (phys_ram_base)
+        munmap(phys_ram_base, ram_size);
+
+    nr_pages = ram_size / XC_PAGE_SIZE;
+
+    page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
+    if (page_array == NULL) {
+        fprintf(logfile, "malloc returned error %d\n", errno);
+        exit(-1);
+    }
+
+    for (i = 0; i < nr_pages; i++)
+        page_array[i] = i;
+       
+#if defined(__ia64__)
+    /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
+       to make QEMU map continuous virtual memory space */
+    if (ram_size > MMIO_START) {       
+        for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
+            page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
+                (STORE_PAGE_START >> XC_PAGE_SHIFT); 
+    }
+#endif
+
+    phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
+                                         PROT_READ|PROT_WRITE,
+                                         page_array, nr_pages);
+    if (phys_ram_base == 0) {
+        fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
+        exit(-1);
+    }
+    free(page_array);
+
+    fprintf(logfile, "%ldMB direct physical ram projection\n", ram_size >> 20);
+}
+#else
+#define phys_ram_base_reinit() ((void)0)
+#endif
 
 #if defined(MAPCACHE)
 
@@ -174,6 +220,8 @@ void qemu_invalidate_map_cache(void)
     last_address_vaddr = NULL;
 
     mapcache_unlock();
+
+    phys_ram_base_reinit();
 }
 
 #endif /* defined(MAPCACHE) */
@@ -191,14 +239,10 @@ static void xen_init_fv(uint64_t ram_siz
     extern void *shared_page;
     extern void *buffered_io_page;
 #ifdef __ia64__
-    unsigned long nr_pages;
-    xen_pfn_t *page_array;
     extern void *buffered_pio_page;
-    int i;
 #endif
 
 #if defined(__i386__) || defined(__x86_64__)
-
     if (qemu_map_cache_init()) {
         fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno);
         exit(-1);
@@ -232,35 +276,9 @@ static void xen_init_fv(uint64_t ram_siz
         fprintf(logfile, "map buffered PIO page returned error %d\n", errno);
         exit(-1);
     }
+#endif
 
-    nr_pages = ram_size / XC_PAGE_SIZE;
-
-    page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
-    if (page_array == NULL) {
-        fprintf(logfile, "malloc returned error %d\n", errno);
-        exit(-1);
-    }
-
-    for (i = 0; i < nr_pages; i++)
-        page_array[i] = i;
-       
-    /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
-       to make QEMU map continuous virtual memory space */
-    if (ram_size > MMIO_START) {       
-        for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
-            page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
-                (STORE_PAGE_START >> XC_PAGE_SHIFT); 
-    }
-
-    phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
-                                         PROT_READ|PROT_WRITE,
-                                         page_array, nr_pages);
-    if (phys_ram_base == 0) {
-        fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
-        exit(-1);
-    }
-    free(page_array);
-#endif
+    phys_ram_base_reinit();
 
     timeoffset_get();
 
diff -r c364f80eb4b5 tools/ioemu/target-i386-dm/exec-dm.c
--- a/tools/ioemu/target-i386-dm/exec-dm.c      Wed Jan 23 13:27:21 2008 +0000
+++ b/tools/ioemu/target-i386-dm/exec-dm.c      Wed Jan 23 15:37:19 2008 +0000
@@ -411,10 +411,12 @@ int iomem_index(target_phys_addr_t addr)
         return 0;
 }
 
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__)
 #define phys_ram_addr(x) (qemu_map_cache(x))
+#elif defined(__x86_64__)
+#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : 
qemu_map_cache(x))
 #elif defined(__ia64__)
-#define phys_ram_addr(x) ((addr < ram_size) ? (phys_ram_base + (x)) : NULL)
+#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)
 #endif
 
 extern unsigned long *logdirty_bitmap;
diff -r c364f80eb4b5 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Wed Jan 23 13:27:21 2008 +0000
+++ b/tools/python/xen/xend/image.py    Wed Jan 23 15:37:19 2008 +0000
@@ -186,6 +186,9 @@ class ImageHandler:
     # xm config file
     def parseDeviceModelArgs(self, vmConfig):
         ret = ["-domain-name", str(self.vm.info['name_label'])]
+
+        ret.append("-m")
+        ret.append("%s" % (self.getRequiredInitialReservation() / 1024))
 
         # Find RFB console device, and if it exists, make QEMU enable
         # the VNC console.
@@ -565,12 +568,6 @@ class IA64_HVM_ImageHandler(HVMImageHand
         # Explicit shadow memory is not a concept 
         return 0
 
-    def getDeviceModelArgs(self, restore = False):
-        args = HVMImageHandler.getDeviceModelArgs(self, restore)
-        args = args + ([ "-m", "%s" %
-                         (self.getRequiredInitialReservation() / 1024) ])
-        return args
-
 
 class IA64_Linux_ImageHandler(LinuxImageHandler):
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.