[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86: mem-access is HVM-only



commit 99192701e34e2e82276289efe4fd450fb91a703b
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Apr 19 15:28:00 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Apr 19 15:28:00 2021 +0200

    x86: mem-access is HVM-only
    
    By excluding the file from being built for !HVM, #ifdef-ary can be
    removed from it.
    
    The new HVM dependency on the Kconfig option is benign for Arm.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
    Reviewed-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/Kconfig         |  2 +-
 xen/arch/x86/mm/mem_access.c | 20 --------------------
 xen/arch/x86/vm_event.c      |  2 ++
 xen/common/Kconfig           |  1 +
 4 files changed, 4 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index 57776d5106..db0de0a9ec 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -16,7 +16,6 @@ config X86
        select HAS_FAST_MULTIPLY
        select HAS_IOPORTS
        select HAS_KEXEC
-       select MEM_ACCESS_ALWAYS_ON
        select HAS_MEM_PAGING
        select HAS_NS16550
        select HAS_PASSTHROUGH
@@ -95,6 +94,7 @@ config HVM
        def_bool !PV_SHIM_EXCLUSIVE
        select COMPAT
        select IOREQ_SERVER
+       select MEM_ACCESS_ALWAYS_ON
        prompt "HVM support"
        ---help---
          Interfaces to support HVM domains.  HVM domains require hardware
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index ede774fb50..7750b81cd4 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -139,7 +139,6 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
     return violation;
 }
 
-#ifdef CONFIG_HVM
 bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
                           struct npfec npfec,
                           vm_event_request_t **req_ptr)
@@ -282,7 +281,6 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct 
p2m_domain *hp2m,
      */
     return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
 }
-#endif
 
 static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
                           struct p2m_domain *ap2m, p2m_access_t a,
@@ -290,7 +288,6 @@ static int set_mem_access(struct domain *d, struct 
p2m_domain *p2m,
 {
     int rc = 0;
 
-#ifdef CONFIG_HVM
     if ( ap2m )
     {
         rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
@@ -299,9 +296,6 @@ static int set_mem_access(struct domain *d, struct 
p2m_domain *p2m,
             rc = 0;
     }
     else
-#else
-    ASSERT(!ap2m);
-#endif
     {
         p2m_access_t _a;
         p2m_type_t t;
@@ -362,7 +356,6 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
     long rc = 0;
 
     /* altp2m view 0 is treated as the hostp2m */
-#ifdef CONFIG_HVM
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
@@ -372,9 +365,6 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
 
         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
     }
-#else
-    ASSERT(!altp2m_idx);
-#endif
 
     if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
         return -EINVAL;
@@ -422,7 +412,6 @@ long p2m_set_mem_access_multi(struct domain *d,
     long rc = 0;
 
     /* altp2m view 0 is treated as the hostp2m */
-#ifdef CONFIG_HVM
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
@@ -432,9 +421,6 @@ long p2m_set_mem_access_multi(struct domain *d,
 
         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
     }
-#else
-    ASSERT(!altp2m_idx);
-#endif
 
     p2m_lock(p2m);
     if ( ap2m )
@@ -484,7 +470,6 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, 
xenmem_access_t *access,
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
-#ifdef CONFIG_HVM
     if ( !altp2m_active(d) )
     {
         if ( altp2m_idx )
@@ -499,9 +484,6 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, 
xenmem_access_t *access,
 
         p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
     }
-#else
-    ASSERT(!altp2m_idx);
-#endif
 
     return _p2m_get_mem_access(p2m, gfn, access);
 }
@@ -512,7 +494,6 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required)
 
     p2m_get_hostp2m(d)->access_required = access_required;
 
-#ifdef CONFIG_HVM
     if ( altp2m_active(d) )
     {
         unsigned int i;
@@ -524,7 +505,6 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required)
                 p2m->access_required = access_required;
         }
     }
-#endif
 }
 
 bool p2m_mem_access_sanity_check(const struct domain *d)
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 8f73a73e2e..ffcc64c40b 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -265,6 +265,7 @@ void vm_event_emulate_check(struct vcpu *v, 
vm_event_response_t *rsp)
         return;
     }
 
+#ifdef CONFIG_HVM
     switch ( rsp->reason )
     {
     case VM_EVENT_REASON_MEM_ACCESS:
@@ -298,6 +299,7 @@ void vm_event_emulate_check(struct vcpu *v, 
vm_event_response_t *rsp)
     default:
         break;
     };
+#endif
 }
 
 void vm_event_reset_vmtrace(struct vcpu *v)
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 3064bf6b89..b6c9827a7b 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -61,6 +61,7 @@ config MEM_ACCESS_ALWAYS_ON
 config MEM_ACCESS
        def_bool MEM_ACCESS_ALWAYS_ON
        prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
+       depends on HVM
        ---help---
 
          Framework to configure memory access types for guests and receive
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.