|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [RFC PATCH V1 04/12] xen/arm: Introduce arch specific bits for IOREQ/DM features
Hi Julien I added logic to properly handle the return value of handle_hvm_io_completion() as you had suggested. For test purpose I simulated handle_hvm_io_completion() to return false sometimes (I couldn't detect real "pending I/O" failure during testing) to see how new logic behaved. I assume I can take this solution for non-RFC series (?)
---
xen/arch/arm/traps.c | 36 ++++++++++++++++++++++--------------
xen/common/hvm/ioreq.c | 9 ++++++++-
xen/include/asm-arm/domain.h | 1 +
xen/include/xen/hvm/ioreq.h | 5 +++++
4 files changed, 36 insertions(+), 15 deletions(-)
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 974c744..f74b514 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -2264,12 +2264,26 @@ static void check_for_pcpu_work(void)
* Process pending work for the vCPU. Any call should be fast or
* implement preemption.
*/
-static void check_for_vcpu_work(void)
+static bool check_for_vcpu_work(void)
{
struct vcpu *v = current;
+#ifdef CONFIG_IOREQ_SERVER
+ if ( hvm_domain_has_ioreq_server(v->domain) )
+ {
+ bool handled;
+
+ local_irq_enable();
+ handled = handle_hvm_io_completion(v);
+ local_irq_disable();
+
+ if ( !handled )
+ return true;
+ }
+#endif
+
if ( likely(!v->arch.need_flush_to_ram) )
- return;
+ return false;
/*
* Give a chance for the pCPU to process work before handling the vCPU
@@ -2280,6 +2294,8 @@ static void check_for_vcpu_work(void)
local_irq_enable();
p2m_flush_vm(v);
local_irq_disable();
+
+ return false;
}
/*
@@ -2290,20 +2306,12 @@ static void check_for_vcpu_work(void)
*/
void leave_hypervisor_to_guest(void)
{
-#ifdef CONFIG_IOREQ_SERVER
- /*
- * XXX: Check the return. Shall we call that in
- * continue_running and context_switch instead?
- * The benefits would be to avoid calling
- * handle_hvm_io_completion on every return.
- */
- local_irq_enable();
- handle_hvm_io_completion(current);
-#endif
local_irq_disable();
- check_for_vcpu_work();
- check_for_pcpu_work();
+ do
+ {
+ check_for_pcpu_work();
+ } while ( check_for_vcpu_work() );
vgic_sync_to_lrs();
diff --git a/xen/common/hvm/ioreq.c b/xen/common/hvm/ioreq.c
index 7e1fa23..81b41ab 100644
--- a/xen/common/hvm/ioreq.c
+++ b/xen/common/hvm/ioreq.c
@@ -38,9 +38,15 @@ static void set_ioreq_server(struct domain *d,
unsigned int id,
struct hvm_ioreq_server *s)
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
- ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+ ASSERT((!s && d->arch.hvm.ioreq_server.server[id]) ||
+ (s && !d->arch.hvm.ioreq_server.server[id]));
d->arch.hvm.ioreq_server.server[id] = s;
+
+ if ( s )
+ d->arch.hvm.ioreq_server.nr_servers ++;
+ else
+ d->arch.hvm.ioreq_server.nr_servers --;
}
/*
@@ -1415,6 +1421,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool
buffered)
void hvm_ioreq_init(struct domain *d)
{
spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+ d->arch.hvm.ioreq_server.nr_servers = 0;
arch_hvm_ioreq_init(d);
}
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 6a01d69..484bd1a 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -68,6 +68,7 @@ struct hvm_domain
struct {
spinlock_t lock;
struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+ unsigned int nr_servers;
} ioreq_server;
bool_t qemu_mapcache_invalidate;
diff --git a/xen/include/xen/hvm/ioreq.h b/xen/include/xen/hvm/ioreq.h
index 40b7b5e..8f78852 100644
--- a/xen/include/xen/hvm/ioreq.h
+++ b/xen/include/xen/hvm/ioreq.h
@@ -23,6 +23,11 @@
#include <asm/hvm/ioreq.h>
+static inline bool hvm_domain_has_ioreq_server(const struct domain *d)
+{
+ return (d->arch.hvm.ioreq_server.nr_servers > 0);
+}
+
#define GET_IOREQ_SERVER(d, id) \
(d)->arch.hvm.ioreq_server.server[id]
--
2.7.4
--
Regards,
Oleksandr Tyshchenko
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |