[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC QEMU PATCH 7/8] xen-hvm: create hotplug memory region for HVM guest



Reserve the address space after guest physical memory for the hotplug
memory region which is used by the existing implementation to place
NVDIMM devices.

Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: "Michael S. Tsirkin" <mst@xxxxxxxxxx>
Cc: Igor Mammedov <imammedo@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Anthony Perard <anthony.perard@xxxxxxxxxx>
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
---
 hw/mem/pc-dimm.c |  5 ++++-
 xen-hvm.c        | 36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 9e8dab0..69c5784 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -28,6 +28,7 @@
 #include "sysemu/kvm.h"
 #include "trace.h"
 #include "hw/virtio/vhost.h"
+#include "hw/xen/xen.h"
 
 typedef struct pc_dimms_capacity {
      uint64_t size;
@@ -107,7 +108,9 @@ void pc_dimm_memory_plug(DeviceState *dev, 
MemoryHotplugState *hpms,
     }
 
     memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
-    vmstate_register_ram(vmstate_mr, dev);
+    if (!xen_enabled()) {
+        vmstate_register_ram(vmstate_mr, dev);
+    }
     numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
 
 out:
diff --git a/xen-hvm.c b/xen-hvm.c
index 768c4c2..68833db 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -25,6 +25,7 @@
 #include "sysemu/xen-mapcache.h"
 #include "trace.h"
 #include "exec/address-spaces.h"
+#include "exec/ram_addr.h"
 
 #include <xen/hvm/ioreq.h>
 #include <xen/hvm/params.h>
@@ -201,6 +202,8 @@ static void xen_ram_init(PCMachineState *pcms,
     uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
                                                    PC_MACHINE_MAX_RAM_BELOW_4G,
                                                    &error_abort);
+    MachineState *machine = MACHINE(pcms);
+    PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
 
     /* Handle the machine opt max-ram-below-4g.  It is basically doing
      * min(xen limit, user limit).
@@ -252,6 +255,39 @@ static void xen_ram_init(PCMachineState *pcms,
                                  pcms->above_4g_mem_size);
         memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
     }
+
+    /* reserve hotplug memory region for vNVDIMM */
+    if (pcmc->has_reserved_memory &&
+        (machine->ram_size < machine->maxram_size)) {
+        ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
+
+        if (QEMU_ALIGN_UP(machine->maxram_size,
+                          TARGET_PAGE_SIZE) != machine->maxram_size) {
+            error_report("maximum memory size must by aligned to multiple of "
+                         "%d bytes", TARGET_PAGE_SIZE);
+            exit(EXIT_FAILURE);
+        }
+
+        pcms->hotplug_memory.base =
+            ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1ULL << 30);
+
+        if (pcmc->enforce_aligned_dimm) {
+            /* size hotplug region assuming 1G page max alignment per slot */
+            hotplug_mem_size += (1ULL << 30) * machine->ram_slots;
+        }
+
+        if ((pcms->hotplug_memory.base + hotplug_mem_size) <
+            hotplug_mem_size) {
+            error_report("unsupported amount of maximum memory: " RAM_ADDR_FMT,
+                         machine->maxram_size);
+            exit(EXIT_FAILURE);
+        }
+
+        memory_region_init(&pcms->hotplug_memory.mr, OBJECT(pcms),
+                           "hotplug-memory", hotplug_mem_size);
+        memory_region_add_subregion(sysmem, pcms->hotplug_memory.base,
+                                    &pcms->hotplug_memory.mr);
+    }
 }
 
 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
-- 
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.