[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/1] xen-hvm.c: Add support for Xen access to vmport
This adds synchronisation of the 6 vcpu registers (only 32bits of them) that vmport.c needs between Xen and QEMU. This is to avoid a 2nd and 3rd exchange between QEMU and Xen to fetch and put these 6 vcpu registers used by the code in vmport.c and vmmouse.c Add new array to XenIOState that allows selection of current_cpu by ioreq_id. Now pass XenIOState to handle_ioreq(). Add new routines regs_to_cpu(), regs_from_cpu(), and handle_vmport_ioreq(). Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx> --- v2: More info in commit message. Added vmware_ioreq_t Added cpu_by_ioreq_id. Set current_cpu in regs_to_cpu(), clear in regs_from_cpu(). Drop all changes to vmport.c xen-hvm.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 137 insertions(+), 15 deletions(-) diff --git a/xen-hvm.c b/xen-hvm.c index 05e522c..d80f21c 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -41,6 +41,31 @@ static MemoryRegion *framebuffer; static bool xen_in_migration; /* Compatibility with older version */ + +/* This allows QEMU to build on a system that has Xen 4.5 or earlier + * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h + * needs to be included before this block and hw/xen/xen_common.h needs to + * be included before xen/hvm/ioreq.h + */ +#ifndef IOREQ_TYPE_VMWARE_PORT +#define IOREQ_TYPE_VMWARE_PORT 3 +struct vmware_ioreq { + uint32_t esi; + uint32_t edi; + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + uint32_t vp_eport; /* evtchn for notifications to/from device model */ + uint16_t addr; + uint8_t state:4; + uint8_t dir:1; /* 1=read, 0=write */ + uint8_t size:3; + uint8_t type; /* I/O type */ +}; +typedef struct vmware_ioreq vmware_ioreq_t; +#endif + #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) { @@ -81,6 +106,7 @@ typedef struct XenIOState { shared_iopage_t *shared_page; buffered_iopage_t *buffered_io_page; QEMUTimer *buffered_io_timer; + CPUState **cpu_by_ioreq_id; /* the evtchn port for polling the notification, */ evtchn_port_t *ioreq_local_port; /* evtchn local port for buffered io */ @@ -101,6 +127,8 @@ typedef struct XenIOState { Notifier wakeup; } XenIOState; +static void handle_ioreq(XenIOState *state, ioreq_t *req); + /* Xen specific function for piix pci */ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) @@ -596,11 +624,23 @@ static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); if (req->state != STATE_IOREQ_READY) { - DPRINTF("I/O request not ready: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size); + if (req->type != IOREQ_TYPE_VMWARE_PORT) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" + FMT_ioreq_size "\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + } else { +#ifdef DEBUG_XEN_HVM + vmware_ioreq_t *vp = (vmware_ioreq_t *)req; + DPRINTF("I/O VMware request not ready: " + "%x, ptr: 0, port: %x, data: %x" + ", count: 1, size: %d\n", + vp->state, (uint16_t)vp->edx, vp->eax, + vp->size); +#endif + } return NULL; } @@ -773,10 +813,73 @@ static void cpu_ioreq_move(ioreq_t *req) } } -static void handle_ioreq(ioreq_t *req) +static void regs_to_cpu(XenIOState *state, vmware_ioreq_t *vmport_req) +{ + X86CPU *cpu; + CPUX86State *env; + + if (!state->cpu_by_ioreq_id[0]) { + CPUState *cpu_state; + + CPU_FOREACH(cpu_state) { + state->cpu_by_ioreq_id[cpu_state->cpu_index] = cpu_state; + } + } + current_cpu = state->cpu_by_ioreq_id[state->send_vcpu]; + cpu = X86_CPU(current_cpu); + env = &cpu->env; + env->regs[R_EAX] = vmport_req->eax; + env->regs[R_EBX] = vmport_req->ebx; + env->regs[R_ECX] = vmport_req->ecx; + env->regs[R_EDX] = vmport_req->edx; + env->regs[R_ESI] = vmport_req->esi; + env->regs[R_EDI] = vmport_req->edi; +} + +static void regs_from_cpu(XenIOState *state, vmware_ioreq_t *vmport_req, + ioreq_t *req) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + assert(sizeof(*vmport_req) == sizeof(*req)); + assert(offsetof(ioreq_t, type) == offsetof(vmware_ioreq_t, type)); + assert(offsetof(ioreq_t, vp_eport) == offsetof(vmware_ioreq_t, vp_eport)); + + vmport_req->eax = env->regs[R_EAX]; + vmport_req->ebx = env->regs[R_EBX]; + vmport_req->ecx = env->regs[R_ECX]; + vmport_req->edx = env->regs[R_EDX]; + vmport_req->esi = env->regs[R_ESI]; + vmport_req->edi = env->regs[R_EDI]; + current_cpu = NULL; +} + +static void handle_vmport_ioreq(XenIOState *state, vmware_ioreq_t *vmport_req) +{ + ioreq_t req; + + memset(&req, 0x00, sizeof(req)); + + req.size = vmport_req->size; + req.count = 1; + req.addr = vmport_req->addr; + req.data = vmport_req->eax; + req.state = STATE_IOREQ_READY; + req.dir = vmport_req->dir; + req.df = 0; + req.type = IOREQ_TYPE_PIO; + req.data_is_ptr = 0; + + regs_to_cpu(state, vmport_req); + handle_ioreq(state, &req); + regs_from_cpu(state, vmport_req, &req); +} + +static void handle_ioreq(XenIOState *state, ioreq_t *req) { - if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && - (req->size < sizeof (target_ulong))) { + if ((req->type != IOREQ_TYPE_VMWARE_PORT) && !req->data_is_ptr && + (req->dir == IOREQ_WRITE) && (req->size < sizeof(target_ulong))) { req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; } @@ -787,6 +890,9 @@ static void handle_ioreq(ioreq_t *req) case IOREQ_TYPE_COPY: cpu_ioreq_move(req); break; + case IOREQ_TYPE_VMWARE_PORT: + handle_vmport_ioreq(state, (vmware_ioreq_t *)req); + break; case IOREQ_TYPE_TIMEOFFSET: break; case IOREQ_TYPE_INVALIDATE: @@ -828,7 +934,7 @@ static int handle_buffered_iopage(XenIOState *state) req.data |= ((uint64_t)buf_req->data) << 32; } - handle_ioreq(&req); + handle_ioreq(state, &req); xen_mb(); state->buffered_io_page->read_pointer += qw ? 2 : 1; @@ -857,14 +963,27 @@ static void cpu_handle_ioreq(void *opaque) handle_buffered_iopage(state); if (req) { - handle_ioreq(req); + handle_ioreq(state, req); if (req->state != STATE_IOREQ_INPROCESS) { - fprintf(stderr, "Badness in I/O request ... not in service?!: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size); + if (req->type != IOREQ_TYPE_VMWARE_PORT) { + fprintf(stderr, + "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %" FMT_ioreq_size + ", size: %" FMT_ioreq_size + ", type: %"FMT_ioreq_size"\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size, req->type); + } else { + vmware_ioreq_t *vp = (vmware_ioreq_t *)req; + fprintf(stderr, + "Badness in I/O VMware request ... not in service?!: " + "%x, ptr: 0, port: %x, data: %x" + ", count: 1, size: %d\n", + vp->state, (uint16_t)vp->edx, vp->eax, + vp->size); + } destroy_hvm_domain(false); return; } @@ -1028,6 +1147,9 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size, hw_error("map buffered IO page returned error %d", errno); } + /* Note: cpus is empty at this point in init */ + state->cpu_by_ioreq_id = g_malloc0(max_cpus * sizeof(CPUState *)); + state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); /* FIXME: how about if we overflow the page here? */ -- 1.8.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |