[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 01/12] VMX: reduce number of posted-interrupt hooks



Three of the four hooks are not exposed outside of vmx.c, and all of
them have only a single possible non-NULL value. So there's no reason to
use hooks here - a simple set of flag indicators is sufficient (and we
don't even need a flag for the VM entry one, as it's always
(de-)activated together the the vCPU blocking hook, which needs to
remain an actual function pointer). This is the more that with the
Spectre v2 workarounds indirect calls have become more expensive.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Set PI_CSW_TO instead of PI_CSW_FROM in vmx_pi_hooks_deassign().

--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -77,6 +77,10 @@ static int vmx_msr_read_intercept(unsign
 static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
 static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
 
+/* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
+#define PI_CSW_FROM (1u << 0)
+#define PI_CSW_TO   (1u << 1)
+
 struct vmx_pi_blocking_vcpu {
     struct list_head     list;
     spinlock_t           lock;
@@ -329,8 +333,7 @@ void vmx_pi_hooks_assign(struct domain *
      * This can make sure the PI (especially the NDST feild) is
      * in proper state when we call vmx_vcpu_block().
      */
-    d->arch.hvm_domain.pi_ops.switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.pi_ops.switch_to = vmx_pi_switch_to;
+    d->arch.hvm_domain.pi_ops.flags = PI_CSW_FROM | PI_CSW_TO;
 
     for_each_vcpu ( d, v )
     {
@@ -346,7 +349,6 @@ void vmx_pi_hooks_assign(struct domain *
     }
 
     d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.pi_ops.do_resume = vmx_pi_do_resume;
 }
 
 /* This function is called when pcidevs_lock is held */
@@ -383,8 +385,7 @@ void vmx_pi_hooks_deassign(struct domain
      * 'switch_to' hook function.
      */
     d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
-    d->arch.hvm_domain.pi_ops.switch_from = NULL;
-    d->arch.hvm_domain.pi_ops.do_resume = NULL;
+    d->arch.hvm_domain.pi_ops.flags = PI_CSW_TO;
 
     for_each_vcpu ( d, v )
         vmx_pi_unblock_vcpu(v);
@@ -934,8 +935,8 @@ static void vmx_ctxt_switch_from(struct
     vmx_restore_host_msrs();
     vmx_save_dr(v);
 
-    if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
-        v->domain->arch.hvm_domain.pi_ops.switch_from(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_FROM )
+        vmx_pi_switch_from(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -943,8 +944,8 @@ static void vmx_ctxt_switch_to(struct vc
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
 
-    if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
-        v->domain->arch.hvm_domain.pi_ops.switch_to(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_TO )
+        vmx_pi_switch_to(v);
 }
 
 
@@ -4330,8 +4331,8 @@ bool vmx_vmenter_helper(const struct cpu
      if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m 
)
          return false;
 
-    if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
-        curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);
+    if ( curr->domain->arch.hvm_domain.pi_ops.vcpu_block )
+        vmx_pi_do_resume(curr);
 
     if ( !cpu_has_vmx_vpid )
         goto out;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -80,20 +80,13 @@ struct hvm_ioreq_server {
  *     and actually has a physical device assigned .
  */
 struct hvm_pi_ops {
-    /* Hook into ctx_switch_from. */
-    void (*switch_from)(struct vcpu *v);
-
-    /* Hook into ctx_switch_to. */
-    void (*switch_to)(struct vcpu *v);
+    unsigned int flags;
 
     /*
      * Hook into arch_vcpu_block(), which is called
      * from vcpu_block() and vcpu_do_poll().
      */
     void (*vcpu_block)(struct vcpu *);
-
-    /* Hook into the vmentry path. */
-    void (*do_resume)(struct vcpu *v);
 };
 
 #define MAX_NR_IOREQ_SERVERS 8






_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.