[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/6] ioreq-server: tidy up use of ioreq_t
This patch tidies up various occurences of single element ioreq_t arrays on the stack and improves coding style. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 38 +++++++++++++++++++------------------- xen/arch/x86/hvm/hvm.c | 2 ++ xen/arch/x86/hvm/io.c | 37 +++++++++++++++++-------------------- 3 files changed, 38 insertions(+), 39 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 154d14e..73808f3 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -57,7 +57,7 @@ static int hvmemul_do_io( int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; struct hvm_vcpu_io *vio; - ioreq_t p[1]; + ioreq_t p; unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; struct page_info *ram_page; @@ -171,39 +171,39 @@ static int hvmemul_do_io( if ( vio->mmio_retrying ) *reps = 1; - p->state = STATE_IOREQ_NONE; - p->dir = dir; - p->data_is_ptr = value_is_ptr; - p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; - p->size = size; - p->addr = addr; - p->count = *reps; - p->df = df; - p->data = value; + p.state = STATE_IOREQ_NONE; + p.dir = dir; + p.data_is_ptr = value_is_ptr; + p.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; + p.size = size; + p.addr = addr; + p.count = *reps; + p.df = df; + p.data = value; if ( dir == IOREQ_WRITE ) - hvmtrace_io_assist(is_mmio, p); + hvmtrace_io_assist(is_mmio, &p); if ( is_mmio ) { - rc = hvm_mmio_intercept(p); + rc = hvm_mmio_intercept(&p); if ( rc == X86EMUL_UNHANDLEABLE ) - rc = hvm_buffered_io_intercept(p); + rc = hvm_buffered_io_intercept(&p); } else { - rc = hvm_portio_intercept(p); + rc = hvm_portio_intercept(&p); } switch ( rc ) { case X86EMUL_OKAY: case X86EMUL_RETRY: - *reps = p->count; - p->state = STATE_IORESP_READY; + *reps = p.count; + p.state = STATE_IORESP_READY; if ( !vio->mmio_retry ) { - hvm_io_assist(p); + hvm_io_assist(&p); vio->io_state = HVMIO_none; } else @@ -212,7 +212,7 @@ static int hvmemul_do_io( break; case X86EMUL_UNHANDLEABLE: rc = X86EMUL_RETRY; - if ( !hvm_send_assist_req(curr, p) ) + if ( !hvm_send_assist_req(curr, &p) ) { rc = X86EMUL_OKAY; vio->io_state = HVMIO_none; @@ -234,7 +234,7 @@ static int hvmemul_do_io( finish_access: if ( dir == IOREQ_READ ) - hvmtrace_io_assist(is_mmio, p); + hvmtrace_io_assist(is_mmio, &p); if ( p_data != NULL ) memcpy(p_data, &vio->io_data, size); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index b8bf225..e07cae3 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -349,7 +349,9 @@ static ioreq_t *get_ioreq(struct vcpu *v) { struct domain *d = v->domain; shared_iopage_t *p = d->arch.hvm_domain.ioreq.va; + ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock)); + return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL; } diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 576641c..c9adb94 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -48,22 +48,19 @@ void send_timeoffset_req(unsigned long timeoff) { - ioreq_t p[1]; + ioreq_t p = { + .type = IOREQ_TYPE_TIMEOFFSET, + .size = 8, + .count = 1, + .dir = IOREQ_WRITE, + .data = timeoff, + .state = STATE_IOREQ_READY, + }; if ( timeoff == 0 ) return; - memset(p, 0, sizeof(*p)); - - p->type = IOREQ_TYPE_TIMEOFFSET; - p->size = 8; - p->count = 1; - p->dir = IOREQ_WRITE; - p->data = timeoff; - - p->state = STATE_IOREQ_READY; - - if ( !hvm_buffered_io_send(p) ) + if ( !hvm_buffered_io_send(&p) ) printk("Unsuccessful timeoffset update\n"); } @@ -71,14 +68,14 @@ void send_timeoffset_req(unsigned long timeoff) void send_invalidate_req(void) { struct vcpu *v = current; - ioreq_t p[1]; - - p->type = IOREQ_TYPE_INVALIDATE; - p->size = 4; - p->dir = IOREQ_WRITE; - p->data = ~0UL; /* flush all */ - - (void)hvm_send_assist_req(v, p); + ioreq_t p = { + .type = IOREQ_TYPE_INVALIDATE, + .size = 4, + .dir = IOREQ_WRITE, + .data = ~0UL, /* flush all */ + }; + + (void)hvm_send_assist_req(v, &p); } int handle_mmio(void) -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |