[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.14] x86/dpci: EOI interrupt regardless of its masking status



commit a3509dcd0fbb86cfe2e4b32996eed3afc363d7da
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Thu Jan 21 16:18:32 2021 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jan 21 16:18:32 2021 +0100

    x86/dpci: EOI interrupt regardless of its masking status
    
    Modify hvm_pirq_eoi to always EOI the interrupt if required, instead
    of not doing such EOI if the interrupt is routed through the vIO-APIC
    and the entry is masked at the time the EOI is performed.
    
    Further unmask of the vIO-APIC pin won't EOI the interrupt, and thus
    the guest OS has to wait for the timeout to expire and the automatic
    EOI to be performed.
    
    This allows to simplify the helpers and drop the vioapic_redir_entry
    parameter from all of them.
    
    Fixes: ccfe4e08455 ('Intel vt-d specific changes in arch/x86/hvm/vmx/vtd.')
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    master commit: eb298f32fac5ac362eef30a66a9c9c42724d4ce6
    master date: 2021-01-07 15:10:29 +0100
---
 xen/arch/x86/hvm/vioapic.c   |  2 +-
 xen/arch/x86/hvm/vpic.c      |  3 +--
 xen/drivers/passthrough/io.c | 21 ++++++++-------------
 xen/include/asm-x86/hvm/io.h |  3 +--
 4 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 52c5c636e3..abb27a5704 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -539,7 +539,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
             if ( is_iommu_enabled(d) )
             {
                 spin_unlock(&d->arch.hvm.irq_lock);
-                hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
+                hvm_dpci_eoi(d, vioapic->base_gsi + pin);
                 spin_lock(&d->arch.hvm.irq_lock);
             }
 
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 61f4b6784c..fcc3de5c0c 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -261,8 +261,7 @@ static void vpic_ioport_write(
                 vpic_update_int_output(vpic);
                 vpic_unlock(vpic);
                 hvm_dpci_eoi(current->domain,
-                             hvm_isa_irq_to_gsi((addr >> 7) ? (irq|8) : irq),
-                             NULL);
+                             hvm_isa_irq_to_gsi((addr >> 7) ? (irq | 8) : 
irq));
                 return; /* bail immediately */
             case 6: /* Set Priority                */
                 vpic->priority_add = (val + 1) & 7;
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 6b1305a3e5..71eaf2c17e 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -950,8 +950,7 @@ static void hvm_dirq_assist(struct domain *d, struct 
hvm_pirq_dpci *pirq_dpci)
     spin_unlock(&d->event_lock);
 }
 
-static void hvm_pirq_eoi(struct pirq *pirq,
-                         const union vioapic_redir_entry *ent)
+static void hvm_pirq_eoi(struct pirq *pirq)
 {
     struct hvm_pirq_dpci *pirq_dpci;
 
@@ -968,7 +967,6 @@ static void hvm_pirq_eoi(struct pirq *pirq,
      * since interrupt is still not EOIed
      */
     if ( --pirq_dpci->pending ||
-         (ent && ent->fields.mask) ||
          !pt_irq_need_timer(pirq_dpci->flags) )
         return;
 
@@ -977,19 +975,17 @@ static void hvm_pirq_eoi(struct pirq *pirq,
 }
 
 static void __hvm_dpci_eoi(struct domain *d,
-                           const struct hvm_girq_dpci_mapping *girq,
-                           const union vioapic_redir_entry *ent)
+                           const struct hvm_girq_dpci_mapping *girq)
 {
     struct pirq *pirq = pirq_info(d, girq->machine_gsi);
 
     if ( !hvm_domain_use_pirq(d, pirq) )
         hvm_pci_intx_deassert(d, girq->device, girq->intx);
 
-    hvm_pirq_eoi(pirq, ent);
+    hvm_pirq_eoi(pirq);
 }
 
-static void hvm_gsi_eoi(struct domain *d, unsigned int gsi,
-                        const union vioapic_redir_entry *ent)
+static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
 {
     struct pirq *pirq = pirq_info(d, gsi);
 
@@ -998,11 +994,10 @@ static void hvm_gsi_eoi(struct domain *d, unsigned int 
gsi,
         return;
 
     hvm_gsi_deassert(d, gsi);
-    hvm_pirq_eoi(pirq, ent);
+    hvm_pirq_eoi(pirq);
 }
 
-void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
-                  const union vioapic_redir_entry *ent)
+void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi)
 {
     const struct hvm_irq_dpci *hvm_irq_dpci;
     const struct hvm_girq_dpci_mapping *girq;
@@ -1013,7 +1008,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int 
guest_gsi,
     if ( is_hardware_domain(d) )
     {
         spin_lock(&d->event_lock);
-        hvm_gsi_eoi(d, guest_gsi, ent);
+        hvm_gsi_eoi(d, guest_gsi);
         goto unlock;
     }
 
@@ -1030,7 +1025,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int 
guest_gsi,
         goto unlock;
 
     list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
-        __hvm_dpci_eoi(d, girq, ent);
+        __hvm_dpci_eoi(d, girq);
 
 unlock:
     spin_unlock(&d->event_lock);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 558426b772..9453b9b2b7 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -118,8 +118,7 @@ bool handle_mmio_with_translation(unsigned long gla, 
unsigned long gpfn,
                                   struct npfec);
 bool handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
-                  const union vioapic_redir_entry *ent);
+void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq);
 void msix_write_completion(struct vcpu *);
 
 #ifdef CONFIG_HVM
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.14



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.