[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [v3 12/15] vmx: posted-interrupt handling when vCPU is blocked



This patch includes the following aspects:
- Add a global vector to wake up the blocked vCPU
  when an interrupt is being posted to it (This
  part was sugguested by Yang Zhang <yang.z.zhang@xxxxxxxxx>).
- Adds a new per-vCPU tasklet to wakeup the blocked
  vCPU. It can be used in the case vcpu_unblock
  cannot be called directly.
- Define two per-cpu variables:
      * pi_blocked_vcpu:
      A list storing the vCPUs which were blocked on this pCPU.

      * pi_blocked_vcpu_lock:
      The spinlock to protect pi_blocked_vcpu.

Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
v3:
- This patch is generated by merging the following three patches in v2:
   [RFC v2 09/15] Add a new per-vCPU tasklet to wakeup the blocked vCPU
   [RFC v2 10/15] vmx: Define two per-cpu variables
   [RFC v2 11/15] vmx: Add a global wake-up vector for VT-d Posted-Interrupts
- rename 'vcpu_wakeup_tasklet' to 'pi_vcpu_wakeup_tasklet'
- Move the definition of 'pi_vcpu_wakeup_tasklet' to 'struct arch_vmx_struct'
- rename 'vcpu_wakeup_tasklet_handler' to 'pi_vcpu_wakeup_tasklet_handler'
- Make pi_wakeup_interrupt() static
- Rename 'blocked_vcpu_list' to 'pi_blocked_vcpu_list'
- move 'pi_blocked_vcpu_list' to 'struct arch_vmx_struct'
- Rename 'blocked_vcpu' to 'pi_blocked_vcpu'
- Rename 'blocked_vcpu_lock' to 'pi_blocked_vcpu_lock'

 xen/arch/x86/hvm/vmx/vmcs.c        |  3 +++
 xen/arch/x86/hvm/vmx/vmx.c         | 54 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h      |  1 +
 xen/include/asm-x86/hvm/vmx/vmcs.h |  5 ++++
 xen/include/asm-x86/hvm/vmx/vmx.h  |  5 ++++
 5 files changed, 68 insertions(+)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 11dc1b5..0c5ce3f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -631,6 +631,9 @@ int vmx_cpu_up(void)
     if ( cpu_has_vmx_vpid )
         vpid_sync_all();
 
+    INIT_LIST_HEAD(&per_cpu(pi_blocked_vcpu, cpu));
+    spin_lock_init(&per_cpu(pi_blocked_vcpu_lock, cpu));
+
     return 0;
 }
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b94ef6a..7db6009 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -82,7 +82,20 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t 
*msr_content);
 static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
 static void vmx_invlpg_intercept(unsigned long vaddr);
 
+/*
+ * We maintian a per-CPU linked-list of vCPU, so in PI wakeup handler we
+ * can find which vCPU should be waken up.
+ */
+DEFINE_PER_CPU(struct list_head, pi_blocked_vcpu);
+DEFINE_PER_CPU(spinlock_t, pi_blocked_vcpu_lock);
+
 uint8_t __read_mostly posted_intr_vector;
+uint8_t __read_mostly pi_wakeup_vector;
+
+static void pi_vcpu_wakeup_tasklet_handler(unsigned long arg)
+{
+    vcpu_unblock((struct vcpu *)arg);
+}
 
 static int vmx_domain_initialise(struct domain *d)
 {
@@ -148,11 +161,19 @@ static int vmx_vcpu_initialise(struct vcpu *v)
     if ( v->vcpu_id == 0 )
         v->arch.user_regs.eax = 1;
 
+    tasklet_init(
+        &v->arch.hvm_vmx.pi_vcpu_wakeup_tasklet,
+        pi_vcpu_wakeup_tasklet_handler,
+        (unsigned long)v);
+
+    INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocked_vcpu_list);
+
     return 0;
 }
 
 static void vmx_vcpu_destroy(struct vcpu *v)
 {
+    tasklet_kill(&v->arch.hvm_vmx.pi_vcpu_wakeup_tasklet);
     /*
      * There are cases that domain still remains in log-dirty mode when it is
      * about to be destroyed (ex, user types 'xl destroy <dom>'), in which case
@@ -1848,6 +1869,33 @@ static struct hvm_function_table __initdata 
vmx_function_table = {
     .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
 };
 
+/*
+ * Handle VT-d posted-interrupt when VCPU is blocked.
+ */
+static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
+{
+    struct arch_vmx_struct *vmx;
+    unsigned int cpu = smp_processor_id();
+
+    spin_lock(&per_cpu(pi_blocked_vcpu_lock, cpu));
+
+    /*
+     * FIXME: The length of the list depends on how many
+     * vCPU is current blocked on this specific pCPU.
+     * This may hurt the interrupt latency if the list
+     * grows to too many entries.
+     */
+    list_for_each_entry(vmx, &per_cpu(pi_blocked_vcpu, cpu),
+                        pi_blocked_vcpu_list)
+        if ( vmx->pi_desc.on )
+            tasklet_schedule(&vmx->pi_vcpu_wakeup_tasklet);
+
+    spin_unlock(&per_cpu(pi_blocked_vcpu_lock, cpu));
+
+    ack_APIC_irq();
+    this_cpu(irq_count)++;
+}
+
 const struct hvm_function_table * __init start_vmx(void)
 {
     set_in_cr4(X86_CR4_VMXE);
@@ -1884,11 +1932,17 @@ const struct hvm_function_table * __init start_vmx(void)
     }
 
     if ( cpu_has_vmx_posted_intr_processing )
+    {
         alloc_direct_apic_vector(&posted_intr_vector, event_check_interrupt);
+
+        if ( iommu_intpost )
+            alloc_direct_apic_vector(&pi_wakeup_vector, pi_wakeup_interrupt);
+    }
     else
     {
         vmx_function_table.deliver_posted_intr = NULL;
         vmx_function_table.sync_pir_to_irr = NULL;
+        vmx_function_table.pi_desc_update = NULL;
     }
 
     if ( cpu_has_vmx_ept
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 77eeac5..e621c30 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -195,6 +195,7 @@ struct hvm_function_table {
     void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
     void (*sync_pir_to_irr)(struct vcpu *v);
     void (*handle_eoi)(u8 vector);
+    void (*pi_desc_update)(struct vcpu *v, int old_state);
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index dedfaef..b6b34d1 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -162,6 +162,11 @@ struct arch_vmx_struct {
     struct page_info     *vmwrite_bitmap;
 
     struct page_info     *pml_pg;
+
+    /* Tasklet for pi_wakeup_blocked_vcpu() */
+    struct tasklet       pi_vcpu_wakeup_tasklet;
+
+    struct list_head     pi_blocked_vcpu_list;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 5853563..663af33 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -29,6 +29,11 @@
 #include <asm/hvm/trace.h>
 #include <asm/hvm/vmx/vmcs.h>
 
+DECLARE_PER_CPU(struct list_head, pi_blocked_vcpu);
+DECLARE_PER_CPU(spinlock_t, pi_blocked_vcpu_lock);
+
+extern uint8_t pi_wakeup_vector;
+
 typedef union {
     struct {
         u64 r       :   1,  /* bit 0 - Read permission */
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.