[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 01/23] x86: change name of parameter for various invlpg functions



They all incorrectly named a parameter virtual address while it should
have been linear address.

Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         | 14 +++++++-------
 xen/arch/x86/hvm/vmx/vmx.c         | 12 ++++++------
 xen/arch/x86/mm.c                  | 10 +++++-----
 xen/arch/x86/mm/hap/hap.c          |  2 +-
 xen/arch/x86/mm/shadow/multi.c     | 14 +++++++-------
 xen/arch/x86/mm/shadow/none.c      |  2 +-
 xen/include/asm-x86/hvm/hvm.h      |  6 +++---
 xen/include/asm-x86/hvm/svm/asid.h |  4 ++--
 xen/include/asm-x86/hvm/svm/svm.h  |  4 ++--
 xen/include/asm-x86/paging.h       |  3 ++-
 10 files changed, 36 insertions(+), 35 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 37f782b..cecd4f0 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2500,18 +2500,18 @@ static void svm_vmexit_do_invalidate_cache(struct 
cpu_user_regs *regs)
 }
 
 static void svm_invlpga_intercept(
-    struct vcpu *v, unsigned long vaddr, uint32_t asid)
+    struct vcpu *v, unsigned long linear, uint32_t asid)
 {
-    svm_invlpga(vaddr,
+    svm_invlpga(linear,
                 (asid == 0)
                 ? v->arch.hvm_vcpu.n1asid.asid
                 : vcpu_nestedhvm(v).nv_n2asid.asid);
 }
 
-static void svm_invlpg_intercept(unsigned long vaddr)
+static void svm_invlpg_intercept(unsigned long linear)
 {
-    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
-    paging_invlpg(current, vaddr);
+    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
+    paging_invlpg(current, linear);
 }
 
 static bool is_invlpg(const struct x86_emulate_state *state,
@@ -2524,9 +2524,9 @@ static bool is_invlpg(const struct x86_emulate_state 
*state,
            (ext & 7) == 7;
 }
 
-static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
+static void svm_invlpg(struct vcpu *v, unsigned long linear)
 {
-    svm_asid_g_invlpg(v, vaddr);
+    svm_asid_g_invlpg(v, linear);
 }
 
 static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 73f0d52..2f4947a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -75,7 +75,7 @@ static void vmx_wbinvd_intercept(void);
 static void vmx_fpu_dirty_intercept(void);
 static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
 static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
+static void vmx_invlpg(struct vcpu *v, unsigned long linear);
 
 struct vmx_pi_blocking_vcpu {
     struct list_head     list;
@@ -2594,16 +2594,16 @@ static void vmx_dr_access(unsigned long 
exit_qualification,
     vmx_update_cpu_exec_control(v);
 }
 
-static void vmx_invlpg_intercept(unsigned long vaddr)
+static void vmx_invlpg_intercept(unsigned long linear)
 {
-    HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
-    paging_invlpg(current, vaddr);
+    HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
+    paging_invlpg(current, linear);
 }
 
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
+static void vmx_invlpg(struct vcpu *v, unsigned long linear)
 {
     if ( cpu_has_vmx_vpid )
-        vpid_sync_vcpu_gva(v, vaddr);
+        vpid_sync_vcpu_gva(v, linear);
 }
 
 static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8ac4412..a774458 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5788,19 +5788,19 @@ const unsigned long *__init 
get_platform_badpages(unsigned int *array_size)
     return bad_pages;
 }
 
-void paging_invlpg(struct vcpu *v, unsigned long va)
+void paging_invlpg(struct vcpu *v, unsigned long linear)
 {
-    if ( !is_canonical_address(va) )
+    if ( !is_canonical_address(linear) )
         return;
 
     if ( paging_mode_enabled(v->domain) &&
-         !paging_get_hostmode(v)->invlpg(v, va) )
+         !paging_get_hostmode(v)->invlpg(v, linear) )
         return;
 
     if ( is_pv_vcpu(v) )
-        flush_tlb_one_local(va);
+        flush_tlb_one_local(linear);
     else
-        hvm_invlpg(v, va);
+        hvm_invlpg(v, linear);
 }
 
 /* Build a 32bit PSE page table using 4MB pages. */
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 812a840..c78da99 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -650,7 +650,7 @@ static int hap_page_fault(struct vcpu *v, unsigned long va,
  * should not be intercepting it.  However, we need to correctly handle
  * getting here from instruction emulation.
  */
-static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
+static bool_t hap_invlpg(struct vcpu *v, unsigned long linear)
 {
     /*
      * Emulate INVLPGA:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 9e43533..fe322ec 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3554,7 +3554,7 @@ propagate:
  * instruction should be issued on the hardware, or false if it's safe not
  * to do so.
  */
-static bool sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long linear)
 {
     mfn_t sl1mfn;
     shadow_l2e_t sl2e;
@@ -3577,14 +3577,14 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
     {
         shadow_l3e_t sl3e;
         if ( !(shadow_l4e_get_flags(
-                   sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
+                   sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
                & _PAGE_PRESENT) )
             return false;
         /* This must still be a copy-from-user because we don't have the
          * paging lock, and the higher-level shadows might disappear
          * under our feet. */
         if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
-                                      + shadow_l3_linear_offset(va)),
+                                      + shadow_l3_linear_offset(linear)),
                               sizeof (sl3e)) != 0 )
         {
             perfc_incr(shadow_invlpg_fault);
@@ -3594,7 +3594,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
             return false;
     }
 #else /* SHADOW_PAGING_LEVELS == 3 */
-    if ( 
!(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
+    if ( 
!(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)])
            & _PAGE_PRESENT) )
         // no need to flush anything if there's no SL2...
         return false;
@@ -3603,7 +3603,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
     /* This must still be a copy-from-user because we don't have the shadow
      * lock, and the higher-level shadows might disappear under our feet. */
     if ( __copy_from_user(&sl2e,
-                          sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
+                          sh_linear_l2_table(v) + 
shadow_l2_linear_offset(linear),
                           sizeof (sl2e)) != 0 )
     {
         perfc_incr(shadow_invlpg_fault);
@@ -3647,7 +3647,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
              * feet. */
             if ( __copy_from_user(&sl2e,
                                   sh_linear_l2_table(v)
-                                  + shadow_l2_linear_offset(va),
+                                  + shadow_l2_linear_offset(linear),
                                   sizeof (sl2e)) != 0 )
             {
                 perfc_incr(shadow_invlpg_fault);
@@ -3669,7 +3669,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
                         && page_is_out_of_sync(pg) ) )
             {
                 shadow_l1e_t *sl1;
-                sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(va);
+                sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear);
                 /* Remove the shadow entry that maps this VA */
                 (void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
                                       p2m_invalid, sl1mfn);
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index a8c9604..4de645a 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -37,7 +37,7 @@ static int _page_fault(struct vcpu *v, unsigned long va,
     return 0;
 }
 
-static bool _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long linear)
 {
     ASSERT_UNREACHABLE();
     return true;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 146720c..78e7900 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -160,7 +160,7 @@ struct hvm_function_table {
 
     int  (*event_pending)(struct vcpu *v);
     bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
-    void (*invlpg)(struct vcpu *v, unsigned long vaddr);
+    void (*invlpg)(struct vcpu *v, unsigned long linear);
 
     int  (*cpu_up_prepare)(unsigned int cpu);
     void (*cpu_dead)(unsigned int cpu);
@@ -454,9 +454,9 @@ static inline int hvm_event_pending(struct vcpu *v)
     return hvm_funcs.event_pending(v);
 }
 
-static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
+static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
 {
-    hvm_funcs.invlpg(v, va);
+    hvm_funcs.invlpg(v, linear);
 }
 
 /* These bits in CR4 are owned by the host. */
diff --git a/xen/include/asm-x86/hvm/svm/asid.h 
b/xen/include/asm-x86/hvm/svm/asid.h
index d3a144c..b16bd04 100644
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -25,11 +25,11 @@
 void svm_asid_init(const struct cpuinfo_x86 *c);
 void svm_asid_handle_vmrun(void);
 
-static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
+static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear)
 {
 #if 0
     /* Optimization? */
-    svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
+    svm_invlpga(g_linear, v->arch.hvm_svm.vmcb->guest_asid);
 #endif
 
     /* Safe fallback. Take a new ASID. */
diff --git a/xen/include/asm-x86/hvm/svm/svm.h 
b/xen/include/asm-x86/hvm/svm/svm.h
index 4e5e142..8166046 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -40,13 +40,13 @@ static inline void svm_vmsave_pa(paddr_t vmcb)
         : : "a" (vmcb) : "memory" );
 }
 
-static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+static inline void svm_invlpga(unsigned long linear, uint32_t asid)
 {
     asm volatile (
         ".byte 0x0f,0x01,0xdf"
         : /* output */
         : /* input */
-        "a" (vaddr), "c" (asid));
+        "a" (linear), "c" (asid));
 }
 
 unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index f440e3e..b51e170 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -110,7 +110,8 @@ struct shadow_paging_mode {
 struct paging_mode {
     int           (*page_fault            )(struct vcpu *v, unsigned long va,
                                             struct cpu_user_regs *regs);
-    bool          (*invlpg                )(struct vcpu *v, unsigned long va);
+    bool          (*invlpg                )(struct vcpu *v,
+                                            unsigned long linear);
     unsigned long (*gva_to_gfn            )(struct vcpu *v,
                                             struct p2m_domain *p2m,
                                             unsigned long va,
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.