|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 14/18] vmx: posted-interrupt handling when vCPU is blocked
This patch includes the following aspects:
- Add a global vector to wake up the blocked vCPU
when an interrupt is being posted to it (This
part was sugguested by Yang Zhang <yang.z.zhang@xxxxxxxxx>).
- Adds a new per-vCPU tasklet to wakeup the blocked
vCPU. It can be used in the case vcpu_unblock
cannot be called directly.
- Define two per-cpu variables:
* pi_blocked_vcpu:
A list storing the vCPUs which were blocked on this pCPU.
* pi_blocked_vcpu_lock:
The spinlock to protect pi_blocked_vcpu.
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
v6:
- Fix some typos
- Ack the interrupt right after the spin_unlock in pi_wakeup_interrupt()
v4:
- Use local variables in pi_wakeup_interrupt()
- Remove vcpu from the blocked list when pi_desc.on==1, this
- avoid kick vcpu multiple times.
- Remove tasklet
v3:
- This patch is generated by merging the following three patches in v2:
[RFC v2 09/15] Add a new per-vCPU tasklet to wakeup the blocked vCPU
[RFC v2 10/15] vmx: Define two per-cpu variables
[RFC v2 11/15] vmx: Add a global wake-up vector for VT-d Posted-Interrupts
- rename 'vcpu_wakeup_tasklet' to 'pi_vcpu_wakeup_tasklet'
- Move the definition of 'pi_vcpu_wakeup_tasklet' to 'struct arch_vmx_struct'
- rename 'vcpu_wakeup_tasklet_handler' to 'pi_vcpu_wakeup_tasklet_handler'
- Make pi_wakeup_interrupt() static
- Rename 'blocked_vcpu_list' to 'pi_blocked_vcpu_list'
- move 'pi_blocked_vcpu_list' to 'struct arch_vmx_struct'
- Rename 'blocked_vcpu' to 'pi_blocked_vcpu'
- Rename 'blocked_vcpu_lock' to 'pi_blocked_vcpu_lock'
xen/arch/x86/hvm/vmx/vmcs.c | 3 ++
xen/arch/x86/hvm/vmx/vmx.c | 64 ++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/hvm/vmx/vmcs.h | 3 ++
xen/include/asm-x86/hvm/vmx/vmx.h | 5 +++
4 files changed, 75 insertions(+)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 28c553f..2dabf16 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -661,6 +661,9 @@ int vmx_cpu_up(void)
if ( cpu_has_vmx_vpid )
vpid_sync_all();
+ INIT_LIST_HEAD(&per_cpu(pi_blocked_vcpu, cpu));
+ spin_lock_init(&per_cpu(pi_blocked_vcpu_lock, cpu));
+
return 0;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2c1c770..9cde9a4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -83,7 +83,15 @@ static int vmx_msr_write_intercept(unsigned int msr,
uint64_t msr_content);
static void vmx_invlpg_intercept(unsigned long vaddr);
static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
+/*
+ * We maintain a per-CPU linked-list of vCPU, so in PI wakeup handler we
+ * can find which vCPU should be woken up.
+ */
+DEFINE_PER_CPU(struct list_head, pi_blocked_vcpu);
+DEFINE_PER_CPU(spinlock_t, pi_blocked_vcpu_lock);
+
uint8_t __read_mostly posted_intr_vector;
+uint8_t __read_mostly pi_wakeup_vector;
static int vmx_domain_initialise(struct domain *d)
{
@@ -106,6 +114,9 @@ static int vmx_vcpu_initialise(struct vcpu *v)
spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
+ INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocked_vcpu_list);
+ INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_vcpu_on_set_list);
+
v->arch.schedule_tail = vmx_do_resume;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
@@ -1976,6 +1987,54 @@ static struct hvm_function_table __initdata
vmx_function_table = {
.altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
};
+/*
+ * Handle VT-d posted-interrupt when VCPU is blocked.
+ */
+static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
+{
+ struct arch_vmx_struct *vmx, *tmp;
+ struct vcpu *v;
+ spinlock_t *lock = &this_cpu(pi_blocked_vcpu_lock);
+ struct list_head *blocked_vcpus = &this_cpu(pi_blocked_vcpu);
+ LIST_HEAD(list);
+
+ spin_lock(lock);
+
+ /*
+ * XXX: The length of the list depends on how many vCPU is current
+ * blocked on this specific pCPU. This may hurt the interrupt latency
+ * if the list grows to too many entries.
+ */
+ list_for_each_entry_safe(vmx, tmp, blocked_vcpus, pi_blocked_vcpu_list)
+ {
+ if ( pi_test_on(&vmx->pi_desc) )
+ {
+ list_del_init(&vmx->pi_blocked_vcpu_list);
+
+ /*
+ * We cannot call vcpu_unblock here, since it also needs
+ * 'pi_blocked_vcpu_lock', we store the vCPUs with ON
+ * set in another list and unblock them after we release
+ * 'pi_blocked_vcpu_lock'.
+ */
+ list_add_tail(&vmx->pi_vcpu_on_set_list, &list);
+ }
+ }
+
+ spin_unlock(lock);
+
+ ack_APIC_irq();
+
+ list_for_each_entry_safe(vmx, tmp, &list, pi_vcpu_on_set_list)
+ {
+ v = container_of(vmx, struct vcpu, arch.hvm_vmx);
+ list_del_init(&vmx->pi_vcpu_on_set_list);
+ vcpu_unblock(v);
+ }
+
+ this_cpu(irq_count)++;
+}
+
const struct hvm_function_table * __init start_vmx(void)
{
set_in_cr4(X86_CR4_VMXE);
@@ -2013,7 +2072,12 @@ const struct hvm_function_table * __init start_vmx(void)
}
if ( cpu_has_vmx_posted_intr_processing )
+ {
alloc_direct_apic_vector(&posted_intr_vector, event_check_interrupt);
+
+ if ( iommu_intpost )
+ alloc_direct_apic_vector(&pi_wakeup_vector, pi_wakeup_interrupt);
+ }
else
{
vmx_function_table.deliver_posted_intr = NULL;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 7e81752..9a986d0 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -161,6 +161,9 @@ struct arch_vmx_struct {
struct page_info *vmwrite_bitmap;
struct page_info *pml_pg;
+
+ struct list_head pi_blocked_vcpu_list;
+ struct list_head pi_vcpu_on_set_list;
};
int vmx_create_vmcs(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 03c529c..3fd66ba 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -28,6 +28,11 @@
#include <asm/hvm/trace.h>
#include <asm/hvm/vmx/vmcs.h>
+DECLARE_PER_CPU(struct list_head, pi_blocked_vcpu);
+DECLARE_PER_CPU(spinlock_t, pi_blocked_vcpu_lock);
+
+extern uint8_t pi_wakeup_vector;
+
typedef union {
struct {
u64 r : 1, /* bit 0 - Read permission */
--
2.1.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |