[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/pass-through: avoid double IRQ unbind during domain cleanup



commit 5b58dad089880127674d460494d1a9d68109b3d7
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Apr 30 10:40:59 2020 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Apr 30 10:40:59 2020 +0200

    x86/pass-through: avoid double IRQ unbind during domain cleanup
    
    XEN_DOMCTL_destroydomain creates a continuation if domain_kill -ERESTARTs.
    In that scenario, it is possible to receive multiple _pirq_guest_unbind
    calls for the same pirq from domain_kill, if the pirq has not yet been
    removed from the domain's pirq_tree, as:
      domain_kill()
        -> domain_relinquish_resources()
          -> pci_release_devices()
            -> pci_clean_dpci_irq()
              -> pirq_guest_unbind()
                -> __pirq_guest_unbind()
    
    Avoid recurring invocations of pirq_guest_unbind() by removing the pIRQ
    from the tree being iterated after the first call there. In case such a
    removed entry still has a softirq outstanding, record it and re-check
    upon re-invocation.
    
    Note that pirq_cleanup_check() gets relaxed beyond what's strictly
    needed here, to avoid introducing an asymmetry there between HVM and PV
    guests.
    
    Reported-by: Varad Gautam <vrd@xxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Tested-by: Varad Gautam <vrd@xxxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/irq.c            |  2 +-
 xen/drivers/passthrough/pci.c | 21 +++++++++++++++++++--
 xen/include/asm-x86/hvm/irq.h |  2 ++
 3 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index cc2eb8e925..a69937c840 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1323,7 +1323,7 @@ void (pirq_cleanup_check)(struct pirq *pirq, struct 
domain *d)
     }
 
     if ( radix_tree_delete(&d->pirq_tree, pirq->pirq) != pirq )
-        BUG();
+        BUG_ON(!d->is_dying);
 }
 
 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 15114e7cbc..5846978890 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -873,7 +873,14 @@ static int pci_clean_dpci_irq(struct domain *d,
         xfree(digl);
     }
 
-    return pt_pirq_softirq_active(pirq_dpci) ? -ERESTART : 0;
+    radix_tree_delete(&d->pirq_tree, dpci_pirq(pirq_dpci)->pirq);
+
+    if ( !pt_pirq_softirq_active(pirq_dpci) )
+        return 0;
+
+    domain_get_irq_dpci(d)->pending_pirq_dpci = pirq_dpci;
+
+    return -ERESTART;
 }
 
 static int pci_clean_dpci_irqs(struct domain *d)
@@ -890,8 +897,18 @@ static int pci_clean_dpci_irqs(struct domain *d)
     hvm_irq_dpci = domain_get_irq_dpci(d);
     if ( hvm_irq_dpci != NULL )
     {
-        int ret = pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
+        int ret = 0;
+
+        if ( hvm_irq_dpci->pending_pirq_dpci )
+        {
+            if ( pt_pirq_softirq_active(hvm_irq_dpci->pending_pirq_dpci) )
+                 ret = -ERESTART;
+            else
+                 hvm_irq_dpci->pending_pirq_dpci = NULL;
+        }
 
+        if ( !ret )
+            ret = pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
         if ( ret )
         {
             spin_unlock(&d->event_lock);
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index 5b7e90c179..d306cfeade 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -158,6 +158,8 @@ struct hvm_irq_dpci {
     DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
     /* Record of mapped Links */
     uint8_t link_cnt[NR_LINK];
+    /* Clean up: Entry with a softirq invocation pending / in progress. */
+    struct hvm_pirq_dpci *pending_pirq_dpci;
 };
 
 /* Machine IRQ to guest device/intx mapping. */
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.