|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 12/15] x86/hvm: remove hvm_io_state enumeration
Emulation request status is already covered by STATE_IOREQ_XXX values so
just use those. The mapping is:
HVMIO_none -> STATE_IOREQ_NONE
HVMIO_awaiting_completion -> STATE_IOREQ_READY
HVMIO_completed -> STATE_IORESP_READY
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v7:
- Modified struct field placement as knock-on from previous patch
v6:
- Added Andrew's reviewed-by
v5:
- Added Jan's acked-by
---
xen/arch/x86/hvm/emulate.c | 14 +++++++-------
xen/arch/x86/hvm/hvm.c | 6 +++---
xen/arch/x86/hvm/svm/nestedsvm.c | 2 +-
xen/arch/x86/hvm/vmx/realmode.c | 4 ++--
xen/include/asm-x86/hvm/vcpu.h | 10 ++--------
5 files changed, 15 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 73cce4e..9d9219e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -130,10 +130,10 @@ static int hvmemul_do_io(
switch ( vio->io_state )
{
- case HVMIO_none:
+ case STATE_IOREQ_NONE:
break;
- case HVMIO_completed:
- vio->io_state = HVMIO_none;
+ case STATE_IORESP_READY:
+ vio->io_state = STATE_IOREQ_NONE;
if ( data_is_addr || dir == IOREQ_WRITE )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
@@ -141,7 +141,7 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- vio->io_state = HVMIO_awaiting_completion;
+ vio->io_state = STATE_IOREQ_READY;
vio->io_size = size;
vio->io_dir = dir;
vio->io_data_is_addr = data_is_addr;
@@ -160,7 +160,7 @@ static int hvmemul_do_io(
{
case X86EMUL_OKAY:
vio->io_data = p.data;
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
break;
case X86EMUL_UNHANDLEABLE:
{
@@ -173,13 +173,13 @@ static int hvmemul_do_io(
rc = hvm_process_io_intercept(&null_handler, &p);
if ( rc == X86EMUL_OKAY )
vio->io_data = p.data;
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
}
else
{
rc = hvm_send_assist_req(s, &p);
if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
else if ( data_is_addr || dir == IOREQ_WRITE )
rc = X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8e487d4..3be17f9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -421,11 +421,11 @@ static void hvm_io_assist(ioreq_t *p)
if ( hvm_vcpu_io_need_completion(vio) )
{
- vio->io_state = HVMIO_completed;
+ vio->io_state = STATE_IORESP_READY;
vio->io_data = p->data;
}
else
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
msix_write_completion(curr);
vcpu_end_shutdown_deferral(curr);
@@ -505,7 +505,7 @@ void hvm_do_resume(struct vcpu *v)
guest_cpu_user_regs()->rax = (uint32_t)vio->io_data;
else
memcpy(&guest_cpu_user_regs()->rax, &vio->io_data, vio->io_size);
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
break;
case HVMIO_realmode_completion:
{
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index ad8afb4..2243873 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1221,7 +1221,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.hvm_io.io_state != HVMIO_none )
+ if ( v->arch.hvm_vcpu.hvm_io.io_state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index deb53ae..25533dc 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -205,7 +205,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
vmx_realmode_emulate_one(&hvmemul_ctxt);
- if ( vio->io_state != HVMIO_none || vio->mmio_retry )
+ if ( vio->io_state != STATE_IOREQ_NONE || vio->mmio_retry )
break;
/* Stop emulating unless our segment state is not safe */
@@ -219,7 +219,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
}
/* Need to emulate next time if we've started an IO operation */
- if ( vio->io_state != HVMIO_none )
+ if ( vio->io_state != STATE_IOREQ_NONE )
curr->arch.hvm_vmx.vmx_emulate = 1;
if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 6bb9c12..5c9faf2 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -30,12 +30,6 @@
#include <asm/hvm/svm/nestedsvm.h>
#include <asm/mtrr.h>
-enum hvm_io_state {
- HVMIO_none = 0,
- HVMIO_awaiting_completion,
- HVMIO_completed
-};
-
enum hvm_io_completion {
HVMIO_no_completion,
HVMIO_mmio_completion,
@@ -50,10 +44,10 @@ struct hvm_vcpu_asid {
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
- enum hvm_io_state io_state;
enum hvm_io_completion io_completion;
unsigned long io_data;
unsigned int io_size;
+ uint8_t io_state;
uint8_t io_dir;
uint8_t io_data_is_addr;
@@ -90,7 +84,7 @@ struct hvm_vcpu_io {
static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
{
- return (vio->io_state == HVMIO_awaiting_completion) &&
+ return (vio->io_state == STATE_IOREQ_READY) &&
!vio->io_data_is_addr &&
(vio->io_dir == IOREQ_READ);
}
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |