|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 02/18] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io()
The check is done at the wrong point (since it is irrelevant if the
I/O is to be handled by the hypervisor) and its functionality can be
covered by returning X86EMUL_UNHANDLEABLE from hvm_send_assist_req()
instead.
This patch also removes the domain_crash() call from
hvm_send_assist_req(). Returning X86EMUL_UNHANDLEABLE allows the
higher layers of emulation to decide what to do instead.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 10 ++--------
xen/arch/x86/hvm/hvm.c | 16 ++++++----------
xen/include/asm-x86/hvm/hvm.h | 2 +-
3 files changed, 9 insertions(+), 19 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 9d7af0c..b412302 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -121,12 +121,6 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- if ( hvm_io_pending(curr) )
- {
- gdprintk(XENLOG_WARNING, "WARNING: io already pending?\n");
- return X86EMUL_UNHANDLEABLE;
- }
-
vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ?
HVMIO_dispatched : HVMIO_awaiting_completion;
vio->io_size = size;
@@ -188,8 +182,8 @@ static int hvmemul_do_io(
}
else
{
- rc = X86EMUL_RETRY;
- if ( !hvm_send_assist_req(s, &p) )
+ rc = hvm_send_assist_req(s, &p);
+ if ( rc != X86EMUL_RETRY )
vio->io_state = HVMIO_none;
else if ( data_is_addr || dir == IOREQ_WRITE )
rc = X86EMUL_OKAY;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d5e5242..535d622 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2605,7 +2605,7 @@ int hvm_buffered_io_send(ioreq_t *p)
return 1;
}
-bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
@@ -2613,7 +2613,7 @@ bool_t hvm_send_assist_req(struct hvm_ioreq_server *s,
ioreq_t *proto_p)
ASSERT(s);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
- return 0; /* implicitly bins the i/o operation */
+ return X86EMUL_OKAY;
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
@@ -2628,14 +2628,14 @@ bool_t hvm_send_assist_req(struct hvm_ioreq_server *s,
ioreq_t *proto_p)
{
gprintk(XENLOG_ERR, "device model set bad IO state %d\n",
p->state);
- goto crash;
+ break;
}
if ( unlikely(p->vp_eport != port) )
{
gprintk(XENLOG_ERR, "device model set bad event channel %d\n",
p->vp_eport);
- goto crash;
+ break;
}
proto_p->state = STATE_IOREQ_NONE;
@@ -2651,15 +2651,11 @@ bool_t hvm_send_assist_req(struct hvm_ioreq_server *s,
ioreq_t *proto_p)
*/
p->state = STATE_IOREQ_READY;
notify_via_xen_event_channel(d, port);
- break;
+ return X86EMUL_RETRY;
}
}
- return 1;
-
- crash:
- domain_crash(d);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
void hvm_complete_assist_req(ioreq_t *p)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 77eeac5..57f9605 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -230,7 +230,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip);
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p);
-bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
+int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
void hvm_broadcast_assist_req(ioreq_t *p);
void hvm_complete_assist_req(ioreq_t *p);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |