|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 1/4] VT-d PI: track the number of vcpus on pi blocking list
This patch adds a field, counter, in struct vmx_pi_blocking_vcpu to track
how many entries are on the pi blocking list.
Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
v5:
- introduce two functions for adding or removing vcpus from pi blocking list.
- check the sanity of vcpu count on pi blocking list
v4:
- non-trace part of Patch 1 in v3
---
xen/arch/x86/hvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++----------
1 file changed, 32 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 67fc85b..bf17988 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -83,6 +83,7 @@ static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
struct vmx_pi_blocking_vcpu {
struct list_head list;
spinlock_t lock;
+ unsigned int counter;
};
/*
@@ -100,6 +101,24 @@ void vmx_pi_per_cpu_init(unsigned int cpu)
spin_lock_init(&per_cpu(vmx_pi_blocking, cpu).lock);
}
+static void vmx_pi_add_vcpu(struct pi_blocking_vcpu *pbv,
+ struct vmx_pi_blocking_vcpu *vpbv)
+{
+ ASSERT(spin_is_locked(&vpbv->lock));
+ add_sized(&vpbv->counter, 1);
+ ASSERT(read_atomic(&vpbv->counter));
+ list_add_tail(&pbv->list, &vpbv->list);
+}
+
+static void vmx_pi_del_vcpu(struct pi_blocking_vcpu *pbv,
+ struct vmx_pi_blocking_vcpu *vpbv)
+{
+ ASSERT(spin_is_locked(&vpbv->lock));
+ ASSERT(read_atomic(&vpbv->counter));
+ list_del(&pbv->list);
+ add_sized(&vpbv->counter, -1);
+}
+
static void vmx_vcpu_block(struct vcpu *v)
{
unsigned long flags;
@@ -120,8 +139,8 @@ static void vmx_vcpu_block(struct vcpu *v)
*/
ASSERT(old_lock == NULL);
- list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
- &per_cpu(vmx_pi_blocking, v->processor).list);
+ vmx_pi_add_vcpu(&v->arch.hvm_vmx.pi_blocking,
+ &per_cpu(vmx_pi_blocking, v->processor));
spin_unlock_irqrestore(pi_blocking_list_lock, flags);
ASSERT(!pi_test_sn(pi_desc));
@@ -186,7 +205,9 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
if ( v->arch.hvm_vmx.pi_blocking.lock != NULL )
{
ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
- list_del(&v->arch.hvm_vmx.pi_blocking.list);
+ vmx_pi_del_vcpu(&v->arch.hvm_vmx.pi_blocking,
+ container_of(pi_blocking_list_lock,
+ struct vmx_pi_blocking_vcpu, lock));
v->arch.hvm_vmx.pi_blocking.lock = NULL;
}
@@ -234,7 +255,7 @@ void vmx_pi_desc_fixup(unsigned int cpu)
*/
if ( pi_test_on(&vmx->pi_desc) )
{
- list_del(&vmx->pi_blocking.list);
+ vmx_pi_del_vcpu(&vmx->pi_blocking, &per_cpu(vmx_pi_blocking, cpu));
vmx->pi_blocking.lock = NULL;
vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
}
@@ -257,8 +278,9 @@ void vmx_pi_desc_fixup(unsigned int cpu)
write_atomic(&vmx->pi_desc.ndst,
x2apic_enabled ? dest : MASK_INSR(dest,
PI_xAPIC_NDST_MASK));
- list_move(&vmx->pi_blocking.list,
- &per_cpu(vmx_pi_blocking, new_cpu).list);
+ vmx_pi_del_vcpu(&vmx->pi_blocking, &per_cpu(vmx_pi_blocking, cpu));
+ vmx_pi_add_vcpu(&vmx->pi_blocking, &per_cpu(vmx_pi_blocking,
+ new_cpu));
vmx->pi_blocking.lock = new_lock;
spin_unlock(new_lock);
@@ -2351,9 +2373,9 @@ static struct hvm_function_table __initdata
vmx_function_table = {
static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
{
struct arch_vmx_struct *vmx, *tmp;
- spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock;
- struct list_head *blocked_vcpus =
- &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
+ unsigned int cpu = smp_processor_id();
+ spinlock_t *lock = &per_cpu(vmx_pi_blocking, cpu).lock;
+ struct list_head *blocked_vcpus = &per_cpu(vmx_pi_blocking, cpu).list;
ack_APIC_irq();
this_cpu(irq_count)++;
@@ -2369,7 +2391,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs
*regs)
{
if ( pi_test_on(&vmx->pi_desc) )
{
- list_del(&vmx->pi_blocking.list);
+ vmx_pi_del_vcpu(&vmx->pi_blocking, &per_cpu(vmx_pi_blocking, cpu));
ASSERT(vmx->pi_blocking.lock == lock);
vmx->pi_blocking.lock = NULL;
vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |