|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 6/7] xen/mem_access: wrap memory access when VM_EVENT=n
Feature memory access is based on vm event subsystem, and it could be disabled
in the future. So a few switch-blocks in do_altp2m_op() need
IS_ENABLED(CONFIG_VM_EVENT) wrapping to pass compilation when ALTP2M=y and
VM_EVENT=n(, hence MEM_ACCESS=n), like HVMOP_altp2m_set_mem_access, etc.
Function p2m_mem_access_check() still needs stub when VM_EVENT=n to
pass compilation.
Although local variable "req_ptr" still remains NULL throughout its lifetime,
with the change of NULL assignment, we will face runtime undefined error only
when CONFIG_USBAN is on. So we strengthen the condition check via adding
vm_event_is_enabled() for the special case.
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v3 -> v4:
- a new commit split from previous "xen/vm_event: consolidate CONFIG_VM_EVENT"
- use IS_ENABLED() to replace #ifdef
- remove unnecessary changes in compat_altp2m_op()
- strengthen the condition check to fix runtime undefined error when
CONFIG_USBAN=y
---
xen/arch/x86/hvm/hvm.c | 97 +++++++++++++++------------
xen/arch/x86/include/asm/mem_access.h | 10 +++
2 files changed, 65 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ebadcf2289..c3417b0593 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -52,6 +52,7 @@
#include <asm/i387.h>
#include <asm/mc146818rtc.h>
#include <asm/mce.h>
+#include <asm/mem_access.h>
#include <asm/monitor.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
@@ -2081,7 +2082,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
#endif
}
- if ( req_ptr )
+ if ( req_ptr && vm_event_is_enabled(curr) )
{
if ( monitor_traps(curr, sync, req_ptr) < 0 )
rc = 0;
@@ -4862,58 +4863,70 @@ static int do_altp2m_op(
break;
case HVMOP_altp2m_set_mem_access:
- if ( a.u.mem_access.pad )
- rc = -EINVAL;
- else
- rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0,
- a.u.mem_access.access,
- a.u.mem_access.view);
+ if ( IS_ENABLED(CONFIG_VM_EVENT) )
+ {
+ if ( a.u.mem_access.pad )
+ rc = -EINVAL;
+ else
+ rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0,
+ a.u.mem_access.access,
+ a.u.mem_access.view);
+ }
break;
case HVMOP_altp2m_set_mem_access_multi:
- if ( a.u.set_mem_access_multi.pad ||
- a.u.set_mem_access_multi.opaque > a.u.set_mem_access_multi.nr )
+ if ( IS_ENABLED(CONFIG_VM_EVENT) )
{
- rc = -EINVAL;
- break;
- }
+ if ( a.u.set_mem_access_multi.pad ||
+ a.u.set_mem_access_multi.opaque >
+ a.u.set_mem_access_multi.nr )
+ {
+ rc = -EINVAL;
+ break;
+ }
- /*
- * Unlike XENMEM_access_op_set_access_multi, we don't need any bits of
- * the 'continuation' counter to be zero (to stash a command in).
- * However, 0x40 is a good 'stride' to make sure that we make
- * a reasonable amount of forward progress before yielding,
- * so use a mask of 0x3F here.
- */
- rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list,
- a.u.set_mem_access_multi.access_list,
- a.u.set_mem_access_multi.nr,
- a.u.set_mem_access_multi.opaque,
- 0x3F,
- a.u.set_mem_access_multi.view);
- if ( rc > 0 )
- {
- a.u.set_mem_access_multi.opaque = rc;
- rc = -ERESTART;
- if ( __copy_field_to_guest(guest_handle_cast(arg,
xen_hvm_altp2m_op_t),
- &a, u.set_mem_access_multi.opaque) )
- rc = -EFAULT;
+ /*
+ * Unlike XENMEM_access_op_set_access_multi, we don't need any
+ * bits of the 'continuation' counter to be zero (to stash a
+ * command in).
+ * However, 0x40 is a good 'stride' to make sure that we make
+ * a reasonable amount of forward progress before yielding,
+ * so use a mask of 0x3F here.
+ */
+ rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list,
+ a.u.set_mem_access_multi.access_list,
+ a.u.set_mem_access_multi.nr,
+ a.u.set_mem_access_multi.opaque,
+ 0x3F,
+ a.u.set_mem_access_multi.view);
+ if ( rc > 0 )
+ {
+ a.u.set_mem_access_multi.opaque = rc;
+ rc = -ERESTART;
+ if ( __copy_field_to_guest(
+ guest_handle_cast(arg, xen_hvm_altp2m_op_t),
+ &a, u.set_mem_access_multi.opaque) )
+ rc = -EFAULT;
+ }
}
break;
case HVMOP_altp2m_get_mem_access:
- if ( a.u.mem_access.pad )
- rc = -EINVAL;
- else
+ if ( IS_ENABLED(CONFIG_VM_EVENT) )
{
- xenmem_access_t access;
-
- rc = p2m_get_mem_access(d, _gfn(a.u.mem_access.gfn), &access,
- a.u.mem_access.view);
- if ( !rc )
+ if ( a.u.mem_access.pad )
+ rc = -EINVAL;
+ else
{
- a.u.mem_access.access = access;
- rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ xenmem_access_t access;
+
+ rc = p2m_get_mem_access(d, _gfn(a.u.mem_access.gfn), &access,
+ a.u.mem_access.view);
+ if ( !rc )
+ {
+ a.u.mem_access.access = access;
+ rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
}
}
break;
diff --git a/xen/arch/x86/include/asm/mem_access.h
b/xen/arch/x86/include/asm/mem_access.h
index 257ed33de1..790bed81e8 100644
--- a/xen/arch/x86/include/asm/mem_access.h
+++ b/xen/arch/x86/include/asm/mem_access.h
@@ -14,6 +14,7 @@
#ifndef __ASM_X86_MEM_ACCESS_H__
#define __ASM_X86_MEM_ACCESS_H__
+#ifdef CONFIG_VM_EVENT
/*
* Setup vm_event request based on the access (gla is -1ull if not available).
* Handles the rw2rx conversion. Boolean return value indicates if event type
@@ -25,6 +26,15 @@
bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
struct vm_event_st **req_ptr);
+#else
+static inline bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct npfec npfec,
+ struct vm_event_st **req_ptr)
+{
+ *req_ptr = NULL;
+ return false;
+}
+#endif /* CONFIG_VM_EVENT */
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |