[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.8] arm: read/write rank->vcpu atomically



commit c75fe6473b73705c9b9f7d8ecc3d043afef55727
Author:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
AuthorDate: Fri Feb 10 18:05:22 2017 -0800
Commit:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
CommitDate: Thu Mar 16 13:59:51 2017 -0700

    arm: read/write rank->vcpu atomically
    
    We don't need a lock in vgic_get_target_vcpu anymore, solving the
    following lock inversion bug: the rank lock should be taken first, then
    the vgic lock. However, gic_update_one_lr is called with the vgic lock
    held, and it calls vgic_get_target_vcpu, which tries to obtain the rank
    lock.
    
    Coverity-ID: 1381855
    Coverity-ID: 1381853
    
    Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
    Reviewed-by: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/arm/vgic-v2.c     |  6 +++---
 xen/arch/arm/vgic-v3.c     |  6 +++---
 xen/arch/arm/vgic.c        | 27 +++++----------------------
 xen/include/asm-arm/vgic.h |  4 +++-
 4 files changed, 14 insertions(+), 29 deletions(-)

diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index c6d280e..92188a2 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -79,7 +79,7 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank 
*rank,
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
 
     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
-        reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
+        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * 
NR_BITS_PER_TARGET);
 
     return reg;
 }
@@ -152,7 +152,7 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
         /* The vCPU ID always starts from 0 */
         new_target--;
 
-        old_target = rank->vcpu[offset];
+        old_target = read_atomic(&rank->vcpu[offset]);
 
         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
@@ -162,7 +162,7 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
                              virq);
         }
 
-        rank->vcpu[offset] = new_target;
+        write_atomic(&rank->vcpu[offset], new_target);
     }
 }
 
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index ec038a3..2d71cac 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -107,7 +107,7 @@ static uint64_t vgic_fetch_irouter(struct vgic_irq_rank 
*rank,
     /* Get the index in the rank */
     offset &= INTERRUPT_RANK_MASK;
 
-    return vcpuid_to_vaffinity(rank->vcpu[offset]);
+    return vcpuid_to_vaffinity(read_atomic(&rank->vcpu[offset]));
 }
 
 /*
@@ -135,7 +135,7 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
     offset &= virq & INTERRUPT_RANK_MASK;
 
     new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
-    old_vcpu = d->vcpu[rank->vcpu[offset]];
+    old_vcpu = d->vcpu[read_atomic(&rank->vcpu[offset])];
 
     /*
      * From the spec (see 8.9.13 in IHI 0069A), any write with an
@@ -153,7 +153,7 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
     if ( new_vcpu != old_vcpu )
         vgic_migrate_irq(old_vcpu, new_vcpu, virq);
 
-    rank->vcpu[offset] = new_vcpu->vcpu_id;
+    write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
 }
 
 static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 0965119..d12e6f0 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -85,7 +85,7 @@ static void vgic_rank_init(struct vgic_irq_rank *rank, 
uint8_t index,
     rank->index = index;
 
     for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
-        rank->vcpu[i] = vcpu;
+        write_atomic(&rank->vcpu[i], vcpu);
 }
 
 int domain_vgic_register(struct domain *d, int *mmio_count)
@@ -218,28 +218,11 @@ int vcpu_vgic_free(struct vcpu *v)
     return 0;
 }
 
-/* The function should be called by rank lock taken. */
-static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
-{
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-
-    ASSERT(spin_is_locked(&rank->lock));
-
-    return v->domain->vcpu[rank->vcpu[virq & INTERRUPT_RANK_MASK]];
-}
-
-/* takes the rank lock */
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
 {
-    struct vcpu *v_target;
     struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-    unsigned long flags;
-
-    vgic_lock_rank(v, rank, flags);
-    v_target = __vgic_get_target_vcpu(v, virq);
-    vgic_unlock_rank(v, rank, flags);
-
-    return v_target;
+    int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
+    return v->domain->vcpu[target];
 }
 
 static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
@@ -326,7 +309,7 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
 
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
-        v_target = __vgic_get_target_vcpu(v, irq);
+        v_target = vgic_get_target_vcpu(v, irq);
         p = irq_to_pending(v_target, irq);
         clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
         gic_remove_from_queues(v_target, irq);
@@ -368,7 +351,7 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
 
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
-        v_target = __vgic_get_target_vcpu(v, irq);
+        v_target = vgic_get_target_vcpu(v, irq);
         p = irq_to_pending(v_target, irq);
         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
         spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index eb06064..51b187f 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -107,7 +107,9 @@ struct vgic_irq_rank {
 
     /*
      * It's more convenient to store a target VCPU per vIRQ
-     * than the register ITARGETSR/IROUTER itself
+     * than the register ITARGETSR/IROUTER itself.
+     * Use atomic operations to read/write the vcpu fields to avoid
+     * taking the rank lock.
      */
     uint8_t vcpu[32];
 };
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.8

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.