|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN PATCH v4 12/14] x86/ioreq: guard VIO_realmode_completion with CONFIG_VMX
From: Xenia Ragiadakou <burzalodowa@xxxxxxxxx>
VIO_realmode_completion is specific to vmx realmode and thus the function
arch_vcpu_ioreq_completion() has actual handling code only in VMX-enabled build,
as for the rest x86 and ARM build configurations it is basically a stub.
So these stubs can be avoided by moving VIO_realmode_completion handler's code
under CONFIG_VMX and then ARM stub can be removed completely.
Signed-off-by: Xenia Ragiadakou <burzalodowa@xxxxxxxxx>
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
---
changes in v4:
- move whole arch_vcpu_ioreq_completion() under CONFIG_VMX and remove
ARM's variant of this handler, as Julien suggested
changes in v1:
- put VIO_realmode_completion enum under #ifdef CONFIG_VMX
---
xen/arch/arm/ioreq.c | 6 ------
xen/arch/x86/hvm/ioreq.c | 2 ++
xen/arch/x86/include/asm/hvm/ioreq.h | 5 +++++
xen/common/ioreq.c | 5 ++++-
xen/include/xen/ioreq.h | 1 -
5 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
index 5df755b48b..2e829d2e7f 100644
--- a/xen/arch/arm/ioreq.c
+++ b/xen/arch/arm/ioreq.c
@@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void)
return false;
}
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
- ASSERT_UNREACHABLE();
- return true;
-}
-
/*
* The "legacy" mechanism of mapping magic pages for the IOREQ servers
* is x86 specific, so the following hooks don't need to be implemented on Arm:
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 4eb7a70182..0406630dc8 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -29,6 +29,7 @@ bool arch_ioreq_complete_mmio(void)
return handle_mmio();
}
+#ifdef CONFIG_VMX
bool arch_vcpu_ioreq_completion(enum vio_completion completion)
{
switch ( completion )
@@ -51,6 +52,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion
completion)
return true;
}
+#endif
static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
{
diff --git a/xen/arch/x86/include/asm/hvm/ioreq.h
b/xen/arch/x86/include/asm/hvm/ioreq.h
index 84be14fd08..c5f16a1e4a 100644
--- a/xen/arch/x86/include/asm/hvm/ioreq.h
+++ b/xen/arch/x86/include/asm/hvm/ioreq.h
@@ -13,6 +13,11 @@
#define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE
#define IOREQ_STATUS_RETRY X86EMUL_RETRY
+#ifdef CONFIG_VMX
+bool arch_vcpu_ioreq_completion(enum vio_completion completion);
+#define arch_vcpu_ioreq_completion
+#endif
+
#endif /* __ASM_X86_HVM_IOREQ_H__ */
/*
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 1257a3d972..10fe932a7e 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -242,9 +242,12 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
res = handle_pio(vio->req.addr, vio->req.size,
vio->req.dir);
break;
-
default:
+#ifdef arch_vcpu_ioreq_completion
res = arch_vcpu_ioreq_completion(completion);
+#else
+ ASSERT_UNREACHABLE();
+#endif
break;
}
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index cd399adf17..22fb9ba7b0 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -111,7 +111,6 @@ void ioreq_domain_init(struct domain *d);
int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);
bool arch_ioreq_complete_mmio(void);
-bool arch_vcpu_ioreq_completion(enum vio_completion completion);
int arch_ioreq_server_map_pages(struct ioreq_server *s);
void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
void arch_ioreq_server_enable(struct ioreq_server *s);
--
2.25.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |