[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/1] xen-hvm.c: Add support for Xen access to vmport



This adds synchronisation of the vcpu registers
between Xen and QEMU.

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
 hw/misc/vmport.c     | 32 ++++++++++++++++++++------------
 include/hw/xen/xen.h |  6 ++++++
 vl.c                 |  1 +
 xen-hvm.c            | 38 ++++++++++++++++++++++++++++++++++++--
 4 files changed, 63 insertions(+), 14 deletions(-)

diff --git a/hw/misc/vmport.c b/hw/misc/vmport.c
index cd5716a..f984b51 100644
--- a/hw/misc/vmport.c
+++ b/hw/misc/vmport.c
@@ -26,6 +26,7 @@
 #include "hw/i386/pc.h"
 #include "sysemu/kvm.h"
 #include "hw/qdev.h"
+#include "hw/xen/xen.h"
 
 //#define VMPORT_DEBUG
 
@@ -49,6 +50,16 @@ typedef struct VMPortState
 
 static VMPortState *port_state;
 
+static CPUX86State *vmport_get_env(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    if (xen_enabled()) {
+        return xen_vmport_env();
+    }
+    return &cpu->env;
+}
+
 void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque)
 {
     if (command >= VMPORT_ENTRIES)
@@ -63,8 +74,7 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
 {
     VMPortState *s = opaque;
     CPUState *cs = current_cpu;
-    X86CPU *cpu = X86_CPU(cs);
-    CPUX86State *env = &cpu->env;
+    CPUX86State *env = vmport_get_env(cs);
     unsigned char command;
     uint32_t eax;
 
@@ -91,32 +101,31 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr 
addr,
 static void vmport_ioport_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
 {
-    X86CPU *cpu = X86_CPU(current_cpu);
+    CPUX86State *env = vmport_get_env(current_cpu);
 
-    cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
+    env->regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
 }
 
 static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
 {
-    X86CPU *cpu = X86_CPU(current_cpu);
+    CPUX86State *env = vmport_get_env(current_cpu);
 
-    cpu->env.regs[R_EBX] = VMPORT_MAGIC;
+    env->regs[R_EBX] = VMPORT_MAGIC;
     return 6;
 }
 
 static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr)
 {
-    X86CPU *cpu = X86_CPU(current_cpu);
+    CPUX86State *env = vmport_get_env(current_cpu);
 
-    cpu->env.regs[R_EBX] = 0x1177;
+    env->regs[R_EBX] = 0x1177;
     return ram_size;
 }
 
 /* vmmouse helpers */
 void vmmouse_get_data(uint32_t *data)
 {
-    X86CPU *cpu = X86_CPU(current_cpu);
-    CPUX86State *env = &cpu->env;
+    CPUX86State *env = vmport_get_env(current_cpu);
 
     data[0] = env->regs[R_EAX]; data[1] = env->regs[R_EBX];
     data[2] = env->regs[R_ECX]; data[3] = env->regs[R_EDX];
@@ -125,8 +134,7 @@ void vmmouse_get_data(uint32_t *data)
 
 void vmmouse_set_data(const uint32_t *data)
 {
-    X86CPU *cpu = X86_CPU(current_cpu);
-    CPUX86State *env = &cpu->env;
+    CPUX86State *env = vmport_get_env(current_cpu);
 
     env->regs[R_EAX] = data[0]; env->regs[R_EBX] = data[1];
     env->regs[R_ECX] = data[2]; env->regs[R_EDX] = data[3];
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index f71f2d8..8ea328c 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -22,12 +22,18 @@ extern uint32_t xen_domid;
 extern enum xen_mode xen_mode;
 
 extern bool xen_allowed;
+extern void *xen_opaque_env;
 
 static inline bool xen_enabled(void)
 {
     return xen_allowed;
 }
 
+static inline void *xen_vmport_env(void)
+{
+    return xen_opaque_env;
+}
+
 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
 void xen_piix3_set_irq(void *opaque, int irq_num, int level);
 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
diff --git a/vl.c b/vl.c
index dbdca59..443a9f5 100644
--- a/vl.c
+++ b/vl.c
@@ -215,6 +215,7 @@ static NotifierList machine_init_done_notifiers =
 static bool tcg_allowed = true;
 bool xen_allowed;
 uint32_t xen_domid;
+void *xen_opaque_env;
 enum xen_mode xen_mode = XEN_EMULATE;
 static int tcg_tb_size;
 
diff --git a/xen-hvm.c b/xen-hvm.c
index 05e522c..e1274bb 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -857,14 +857,48 @@ static void cpu_handle_ioreq(void *opaque)
 
     handle_buffered_iopage(state);
     if (req) {
+#ifdef IOREQ_TYPE_VMWARE_PORT
+        if (req->type == IOREQ_TYPE_VMWARE_PORT) {
+            CPUX86State *env;
+            ioreq_t fake_req = {
+                .type = IOREQ_TYPE_PIO,
+                .addr = (uint16_t)req->size,
+                .size = 4,
+                .dir = IOREQ_READ,
+                .df = 0,
+                .data_is_ptr = 0,
+            };
+            if (!xen_opaque_env) {
+                xen_opaque_env = g_malloc(sizeof(CPUX86State));
+            }
+            env = xen_opaque_env;
+            env->regs[R_EAX] = (uint32_t)(req->addr >> 32);
+            env->regs[R_EBX] = (uint32_t)(req->addr);
+            env->regs[R_ECX] = req->count;
+            env->regs[R_EDX] = req->size;
+            env->regs[R_ESI] = (uint32_t)(req->data >> 32);
+            env->regs[R_EDI] = (uint32_t)(req->data);
+            handle_ioreq(&fake_req);
+            req->addr = ((uint64_t)fake_req.data << 32) |
+                (uint32_t)env->regs[R_EBX];
+            req->count = env->regs[R_ECX];
+            req->size = env->regs[R_EDX];
+            req->data = ((uint64_t)env->regs[R_ESI] << 32) |
+                (uint32_t)env->regs[R_EDI];
+        } else {
+            handle_ioreq(req);
+        }
+#else
         handle_ioreq(req);
+#endif
 
         if (req->state != STATE_IOREQ_INPROCESS) {
             fprintf(stderr, "Badness in I/O request ... not in service?!: "
                     "%x, ptr: %x, port: %"PRIx64", "
-                    "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" 
FMT_ioreq_size "\n",
+                    "data: %"PRIx64", count: %" FMT_ioreq_size
+                    ", size: %" FMT_ioreq_size ", type: %"FMT_ioreq_size"\n",
                     req->state, req->data_is_ptr, req->addr,
-                    req->data, req->count, req->size);
+                    req->data, req->count, req->size, req->type);
             destroy_hvm_domain(false);
             return;
         }
-- 
1.8.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.