[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/hvm: Rework nested hap functions to reduce parameters



commit ddf820e1d6e82051be95eeb30a570426423e4f9f
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Nov 30 17:05:09 2021 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Jan 13 15:19:43 2022 +0000

    x86/hvm: Rework nested hap functions to reduce parameters
    
    Most functions in this call chain have 8 parameters, meaning that the final
    two booleans are spilled to the stack for calls.
    
    First, delete nestedhap_walk_L1_p2m and introduce nhvm_hap_walk_L1_p2m() as 
a
    thin wrapper around hvm_funcs, just like all the other nhvm_*() hooks.  This
    involves including xen/mm.h as the forward declaration of struct npfec is no
    longer enough.
    
    Next, replace the triple of booleans with struct npfec, which contains the
    same information in the bottom 3 bits.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c                       |  5 +---
 xen/arch/x86/hvm/svm/nestedsvm.c             | 11 ++++----
 xen/arch/x86/hvm/vmx/vvmx.c                  | 10 +++----
 xen/arch/x86/include/asm/hvm/hvm.h           | 14 +++++++---
 xen/arch/x86/include/asm/hvm/nestedhvm.h     |  6 +----
 xen/arch/x86/include/asm/hvm/svm/nestedsvm.h |  6 ++---
 xen/arch/x86/include/asm/hvm/vmx/vvmx.h      |  8 +++---
 xen/arch/x86/mm/hap/nested_hap.c             | 40 +++++++---------------------
 xen/arch/x86/mm/p2m.c                        | 12 +++++----
 9 files changed, 47 insertions(+), 65 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d233550ae4..b0ee49a61a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1775,10 +1775,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
          * the same as for shadow paging.
          */
 
-         rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
-                                              npfec.read_access,
-                                              npfec.write_access,
-                                              npfec.insn_fetch);
+        rv = nestedhvm_hap_nested_page_fault(curr, &gpa, npfec);
         switch (rv) {
         case NESTEDHVM_PAGEFAULT_DONE:
         case NESTEDHVM_PAGEFAULT_RETRY:
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 6d90630040..abc178d8d4 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1216,10 +1216,9 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
  * walk is successful, the translated value is returned in
  * L1_gpa. The result value tells what to do next.
  */
-int
-nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x)
+int nsvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
 {
     uint32_t pfec;
     unsigned long nested_cr3, gfn;
@@ -1227,9 +1226,9 @@ nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
     nested_cr3 = nhvm_vcpu_p2m_base(v);
 
     pfec = PFEC_user_mode | PFEC_page_present;
-    if ( access_w )
+    if ( npfec.write_access )
         pfec |= PFEC_write_access;
-    if ( access_x )
+    if ( npfec.insn_fetch )
         pfec |= PFEC_insn_fetch;
 
     /* Walk the guest-supplied NPT table, just as if it were a pagetable */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index e9f94daf64..7419ee9dd0 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2346,16 +2346,16 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 
*msr_content)
  * walk is successful, the translated value is returned in
  * L1_gpa. The result value tells what to do next.
  */
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x)
+int nvmx_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
 {
     int rc;
     unsigned long gfn;
     uint64_t exit_qual;
     uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION;
-    uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
+    uint32_t rwx_rights =
+        (npfec.insn_fetch << 2) | (npfec.write_access << 1) | 
npfec.read_access;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
     vmx_vmcs_enter(v);
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index 2767427aa9..cd9e73e266 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -20,6 +20,8 @@
 #ifndef __ASM_X86_HVM_HVM_H__
 #define __ASM_X86_HVM_HVM_H__
 
+#include <xen/mm.h>
+
 #include <asm/alternative.h>
 #include <asm/asm_defns.h>
 #include <asm/current.h>
@@ -203,8 +205,7 @@ struct hvm_function_table {
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
                                 paddr_t *L1_gpa, unsigned int *page_order,
-                                uint8_t *p2m_acc, bool_t access_r,
-                                bool_t access_w, bool_t access_x);
+                                uint8_t *p2m_acc, struct npfec npfec);
 
     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
     bool_t (*is_singlestep_supported)(void);
@@ -350,7 +351,6 @@ int hvm_debug_op(struct vcpu *v, int32_t op);
 void hvm_toggle_singlestep(struct vcpu *v);
 void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
 
-struct npfec;
 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
                               struct npfec npfec);
 
@@ -631,6 +631,14 @@ static inline enum hvm_intblk 
nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
+static inline int nhvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
+{
+    return hvm_funcs.nhvm_hap_walk_L1_p2m(
+        v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
+}
+
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     hvm_funcs.enable_msr_interception(d, msr);
diff --git a/xen/arch/x86/include/asm/hvm/nestedhvm.h 
b/xen/arch/x86/include/asm/hvm/nestedhvm.h
index d263925786..7184928c2b 100644
--- a/xen/arch/x86/include/asm/hvm/nestedhvm.h
+++ b/xen/arch/x86/include/asm/hvm/nestedhvm.h
@@ -58,11 +58,7 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 #define NESTEDHVM_PAGEFAULT_RETRY      5
 #define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6
 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x);
-
-int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                          unsigned int *page_order, uint8_t *p2m_acc,
-                          bool_t access_r, bool_t access_w, bool_t access_x);
+                                    struct npfec npfec);
 
 /* IO permission map */
 unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);
diff --git a/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h 
b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h
index 0873698457..c3ef235414 100644
--- a/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h
+++ b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h
@@ -122,9 +122,9 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
 void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
 bool_t nestedsvm_gif_isset(struct vcpu *v);
-int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                         unsigned int *page_order, uint8_t *p2m_acc,
-                         bool_t access_r, bool_t access_w, bool_t access_x);
+int nsvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec);
 
 #define NSVM_INTR_NOTHANDLED     3
 #define NSVM_INTR_NOTINTERCEPTED 2
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vvmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vvmx.h
index d5f68f30b1..e4ca3bc6ee 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vvmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vvmx.h
@@ -100,10 +100,10 @@ bool_t nvmx_ept_enabled(struct vcpu *v);
 #define EPT_TRANSLATE_MISCONFIG     2
 #define EPT_TRANSLATE_RETRY         3
 
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x);
+int nvmx_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec);
+
 /*
  * Virtual VMCS layout
  *
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 50fa2dd9f4..d8a7b3b401 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -113,31 +113,13 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
     }
 }
 
-/* This function uses L2_gpa to walk the P2M page table in L1. If the
- * walk is successful, the translated value is returned in
- * L1_gpa. The result value tells what to do next.
- */
-int
-nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order, uint8_t *p2m_acc,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
-{
-    ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
-
-    return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
-        p2m_acc, access_r, access_w, access_x);
-}
-
-
 /* This function uses L1_gpa to walk the P2M table in L0 hypervisor. If the
  * walk is successful, the translated value is returned in L0_gpa. The return 
  * value tells the upper level what to do.
  */
-static int
-nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
-                      p2m_type_t *p2mt, p2m_access_t *p2ma,
-                      unsigned int *page_order,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
+static int nestedhap_walk_L0_p2m(
+    struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa, p2m_type_t *p2mt,
+    p2m_access_t *p2ma, unsigned int *page_order, struct npfec npfec)
 {
     mfn_t mfn;
     int rc;
@@ -154,7 +136,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t 
L1_gpa, paddr_t *L0_gpa,
         goto out;
 
     rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
-    if ( access_w && p2m_is_readonly(*p2mt) )
+    if ( npfec.write_access && p2m_is_readonly(*p2mt) )
         goto out;
 
     if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) )
@@ -176,9 +158,8 @@ out:
  *
  * Returns:
  */
-int
-nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x)
+int nestedhvm_hap_nested_page_fault(
+    struct vcpu *v, paddr_t *L2_gpa, struct npfec npfec)
 {
     int rv;
     paddr_t L1_gpa, L0_gpa;
@@ -192,8 +173,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
-        access_r, access_w, access_x);
+    rv = nhvm_hap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
+                              npfec);
 
     /* let caller to handle these two cases */
     switch (rv) {
@@ -209,9 +190,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
     }
 
     /* ==> we have to walk L0 P2M */
-    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
-        &p2mt_10, &p2ma_10, &page_order_10,
-        access_r, access_w, access_x);
+    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &p2mt_10, &p2ma_10,
+                               &page_order_10, npfec);
 
     /* let upper level caller to handle these two cases */
     switch (rv) {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 98c8201f92..3f81e99fd6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1930,6 +1930,11 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
         const struct paging_mode *mode;
         uint8_t l1_p2ma;
         unsigned int l1_page_order;
+        struct npfec npfec = {
+            .read_access  = 1,
+            .write_access = *pfec & PFEC_write_access,
+            .insn_fetch   = *pfec & PFEC_insn_fetch,
+        };
         int rv;
 
         /* translate l2 guest va into l2 guest gfn */
@@ -1940,11 +1945,8 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
         if ( l2_gfn == gfn_x(INVALID_GFN) )
             return gfn_x(INVALID_GFN);
 
-        rv = nestedhap_walk_L1_p2m(v, pfn_to_paddr(l2_gfn), &l1_gpa,
-                                   &l1_page_order, &l1_p2ma,
-                                   1,
-                                   !!(*pfec & PFEC_write_access),
-                                   !!(*pfec & PFEC_insn_fetch));
+        rv = nhvm_hap_walk_L1_p2m(
+            v, pfn_to_paddr(l2_gfn), &l1_gpa, &l1_page_order, &l1_p2ma, npfec);
 
         if ( rv != NESTEDHVM_PAGEFAULT_DONE )
             return gfn_x(INVALID_GFN);
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.