[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/5] VMX: Add posted interrupt supporting



From: Yang Zhang <yang.z.zhang@xxxxxxxxx>

Add the supporting of using posted interrupt to deliver interrupt.

Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
Reviewed-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
---
 xen/arch/x86/hvm/vlapic.c         |   18 ----------
 xen/arch/x86/hvm/vmx/vmx.c        |   65 +++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/bitops.h      |   10 ++++++
 xen/include/asm-x86/hvm/hvm.h     |    2 +
 xen/include/asm-x86/hvm/vlapic.h  |   18 ++++++++++
 xen/include/asm-x86/hvm/vmx/vmx.h |   27 +++++++++++++++
 6 files changed, 122 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 4b25cc8..f241a7c 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -90,24 +90,6 @@ static const unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
     ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \
      == APIC_TIMER_MODE_TSC_DEADLINE)
 
-
-/*
- * Generic APIC bitmap vector update & search routines.
- */
-
-#define VEC_POS(v) ((v)%32)
-#define REG_POS(v) (((v)/32) * 0x10)
-#define vlapic_test_and_set_vector(vec, bitmap)                         \
-    test_and_set_bit(VEC_POS(vec),                                      \
-                     (unsigned long *)((bitmap) + REG_POS(vec)))
-#define vlapic_test_and_clear_vector(vec, bitmap)                       \
-    test_and_clear_bit(VEC_POS(vec),                                    \
-                       (unsigned long *)((bitmap) + REG_POS(vec)))
-#define vlapic_set_vector(vec, bitmap)                                  \
-    set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
-#define vlapic_clear_vector(vec, bitmap)                                \
-    clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
-
 static int vlapic_find_highest_vector(void *bitmap)
 {
     uint32_t *word = bitmap;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8e1c06f..938e653 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -55,6 +55,7 @@
 #include <asm/debugger.h>
 #include <asm/apic.h>
 #include <asm/hvm/nestedhvm.h>
+#include <asm/event.h>
 
 enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
 
@@ -1449,6 +1450,63 @@ static void vmx_process_isr(int isr, struct vcpu *v)
     vmx_vmcs_exit(v);
 }
 
+static void __vmx_deliver_posted_interrupt(struct vcpu *v)
+{
+    bool_t running;
+
+    running = v->is_running;
+    vcpu_unblock(v);
+    if ( running && (in_irq() || (v != current)) )
+    {
+        unsigned int cpu = v->processor;
+
+        if ( !test_and_set_bit(VCPU_KICK_SOFTIRQ, &softirq_pending(cpu))
+                && (cpu != smp_processor_id()) )
+            send_IPI_mask(cpumask_of(cpu), posted_intr_vector);
+    }
+}
+
+static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector)
+{
+    if ( pi_test_and_set_pir(vector, &v->arch.hvm_vmx.pi_desc) )
+        return;
+
+    if ( unlikely(v->arch.hvm_vmx.eoi_exitmap_changed) )
+    {
+        /* If EOI exitbitmap needs to changed or notification vector
+         * can't be allocated, interrupt will not be injected till
+         * VMEntry as it used to be
+         */
+        pi_set_on(&v->arch.hvm_vmx.pi_desc);
+        goto out;
+    }
+
+    if ( !pi_test_and_set_on(&v->arch.hvm_vmx.pi_desc) )
+    {
+        __vmx_deliver_posted_interrupt(v);
+        return;
+    }
+
+out:
+    vcpu_kick(v);
+}
+
+static void vmx_sync_pir_to_irr(struct vcpu *v)
+{
+    struct vlapic *vlapic = vcpu_vlapic(v);
+    u64 val[4];
+    u32 group, i;
+
+    if ( !pi_test_and_clear_on(&v->arch.hvm_vmx.pi_desc) )
+        return;
+
+    for ( group = 0; group < 4; group++ )
+        val[group] = pi_get_pir(&v->arch.hvm_vmx.pi_desc, group);
+
+    for_each_set_bit(i, val, MAX_VECTOR)
+        vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
+}
+
 static struct hvm_function_table __read_mostly vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -1499,6 +1557,8 @@ static struct hvm_function_table __read_mostly 
vmx_function_table = {
     .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
     .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
     .process_isr          = vmx_process_isr,
+    .deliver_posted_intr  = vmx_deliver_posted_intr,
+    .sync_pir_to_irr      = vmx_sync_pir_to_irr,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
 };
 
@@ -1528,6 +1588,11 @@ struct hvm_function_table * __init start_vmx(void)
  
     if ( cpu_has_vmx_posted_intr_processing )
        alloc_direct_apic_vector(&posted_intr_vector, event_check_interrupt);
+    else
+    {
+        hvm_funcs.deliver_posted_intr = NULL;
+        hvm_funcs.sync_pir_to_irr = NULL;
+    }
 
     setup_vmcs_dump();
 
diff --git a/xen/include/asm-x86/bitops.h b/xen/include/asm-x86/bitops.h
index 2bbd169..46b6cd1 100644
--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -367,6 +367,16 @@ static inline unsigned int __scanbit(unsigned long val, 
unsigned long max)
   ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \
   __find_next_zero_bit(addr,size,off)))
 
+/**
+ * for_each_set_bit - iterate over every set bit in a memory region
+ * @bit: The integer iterator
+ * @addr: The address to base the search on
+ * @size: The maximum size to search
+ */
+#define for_each_set_bit(bit, addr, size)                   \
+    for ( (bit) = find_first_bit((addr), (size));           \
+          (bit) < (size);                                   \
+          (bit) = find_next_bit((addr), (size), (bit) + 1) )
 
 /**
  * find_first_set_bit - find the first set bit in @word
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 2fa2ea5..85dc85b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -184,6 +184,8 @@ struct hvm_function_table {
     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
     int (*virtual_intr_delivery_enabled)(void);
     void (*process_isr)(int isr, struct vcpu *v);
+    void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
+    void (*sync_pir_to_irr)(struct vcpu *v);
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index 101ef57..82a84da 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -54,6 +54,23 @@
 #define vlapic_x2apic_mode(vlapic)                              \
     ((vlapic)->hw.apic_base_msr & MSR_IA32_APICBASE_EXTD)
 
+/*
+ * Generic APIC bitmap vector update & search routines.
+ */
+
+#define VEC_POS(v) ((v)%32)
+#define REG_POS(v) (((v)/32) * 0x10)
+#define vlapic_test_and_set_vector(vec, bitmap)                         \
+    test_and_set_bit(VEC_POS(vec),                                      \
+                     (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_test_and_clear_vector(vec, bitmap)                       \
+    test_and_clear_bit(VEC_POS(vec),                                    \
+                       (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_set_vector(vec, bitmap)                                  \
+    set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_clear_vector(vec, bitmap)                                \
+    clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
+
 struct vlapic {
     struct hvm_hw_lapic      hw;
     struct hvm_hw_lapic_regs *regs;
@@ -104,6 +121,7 @@ void vlapic_handle_EOI_induced_exit(struct vlapic *vlapic, 
int vector);
 void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high);
 
 int vlapic_apicv_write(struct vcpu *v, unsigned int offset);
+void vlapic_set_tmr(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
 
 struct vlapic *vlapic_lowest_prio(
     struct domain *d, struct vlapic *source,
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index d4d6feb..1bceada 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -99,6 +99,33 @@ void vmx_update_exception_bitmap(struct vcpu *v);
 void vmx_update_cpu_exec_control(struct vcpu *v);
 void vmx_update_secondary_exec_control(struct vcpu *v);
 
+#define POSTED_INTR_ON  0
+static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+{
+    return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
+static inline int pi_test_and_set_on(struct pi_desc *pi_desc)
+{
+    return test_and_set_bit(POSTED_INTR_ON,
+            (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_set_on(struct pi_desc *pi_desc)
+{
+    return set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_and_clear_on(struct pi_desc *pi_desc)
+{
+    return test_and_clear_bit(POSTED_INTR_ON,
+            (unsigned long *)&pi_desc->control);
+}
+
+static inline u64 pi_get_pir(struct pi_desc *pi_desc, int group)
+{
+    return xchg(&pi_desc->pir[group], 0);
+}
 
 /*
  * Exit Reasons
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.