[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] passthrough: use tasklet to deliver interrupts



This patch enables delivery of interrupts even if the VCPU#0 is halted.

Note: the change of raise_softirq (convert to macro from static inline)
 is needed for ia64 build.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 8af26fef898c xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Mon Jul 27 13:54:56 2009 +0900
@@ -305,7 +305,6 @@ void leave_hypervisor_tail(void)
                 viosapic_set_irq(d, callback_irq, 0);
             }
         }
-        hvm_dirq_assist(v);
     }
 
     rmb();
diff -r 8af26fef898c xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c       Mon Jul 27 13:54:56 2009 +0900
@@ -109,7 +109,6 @@ asmlinkage void svm_intr_assist(void)
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
-    hvm_dirq_assist(v);
 
     do {
         intack = hvm_vcpu_has_pending_irq(v);
diff -r 8af26fef898c xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Mon Jul 27 13:54:56 2009 +0900
@@ -127,7 +127,6 @@ asmlinkage void vmx_intr_assist(void)
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
-    hvm_dirq_assist(v);
 
     do {
         intack = hvm_vcpu_has_pending_irq(v);
diff -r 8af26fef898c xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/drivers/passthrough/io.c      Mon Jul 27 13:54:56 2009 +0900
@@ -23,6 +23,8 @@
 #include <asm/hvm/irq.h>
 #include <asm/hvm/iommu.h>
 #include <xen/hvm/irq.h>
+
+static void hvm_dirq_assist(unsigned long _d);
 
 static int pt_irq_need_timer(uint32_t flags)
 {
@@ -114,6 +116,8 @@ int pt_irq_create_bind_vtd(
             return -ENOMEM;
         }
         memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
+        tasklet_init(&hvm_irq_dpci->dirq_tasklet, 
+                     hvm_dirq_assist, (unsigned long)d);
         hvm_irq_dpci->mirq = xmalloc_array(struct hvm_mirq_dpci_mapping,
                                            d->nr_pirqs);
         hvm_irq_dpci->dirq_mask = xmalloc_array(unsigned long,
@@ -368,18 +372,8 @@ int hvm_do_IRQ_dpci(struct domain *d, un
          !test_bit(mirq, dpci->mapping))
         return 0;
 
-    /*
-     * Set a timer here to avoid situations where the IRQ line is shared, and
-     * the device belonging to the pass-through guest is not yet active. In
-     * this case the guest may not pick up the interrupt (e.g., masked at the
-     * PIC) and we need to detect that.
-     */
     set_bit(mirq, dpci->dirq_mask);
-    if ( pt_irq_need_timer(dpci->mirq[mirq].flags) )
-        set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
-                  NOW() + PT_IRQ_TIME_OUT);
-    vcpu_kick(d->vcpu[0]);
-
+    tasklet_schedule(&dpci->dirq_tasklet);
     return 1;
 }
 
@@ -429,16 +423,15 @@ static int hvm_pci_msi_assert(struct dom
 }
 #endif
 
-void hvm_dirq_assist(struct vcpu *v)
+static void hvm_dirq_assist(unsigned long _d)
 {
     unsigned int irq;
     uint32_t device, intx;
-    struct domain *d = v->domain;
+    struct domain *d = (struct domain *)_d;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     struct dev_intx_gsi_link *digl;
 
-    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
-        return;
+    ASSERT(hvm_irq_dpci);
 
     for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
           irq < d->nr_pirqs;
@@ -456,9 +449,6 @@ void hvm_dirq_assist(struct vcpu *v)
             continue;
         }
 #endif
-        if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
-            stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
-
         list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
         {
             device = digl->device;
diff -r 8af26fef898c xen/drivers/passthrough/pci.c
--- a/xen/drivers/passthrough/pci.c     Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/drivers/passthrough/pci.c     Mon Jul 27 13:54:56 2009 +0900
@@ -209,6 +209,8 @@ static void pci_clean_dpci_irqs(struct d
     hvm_irq_dpci = domain_get_irq_dpci(d);
     if ( hvm_irq_dpci != NULL )
     {
+        tasklet_kill(&hvm_irq_dpci->dirq_tasklet);
+
         for ( i = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
               i < d->nr_pirqs;
               i = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, i + 1) )
diff -r 8af26fef898c xen/include/xen/hvm/irq.h
--- a/xen/include/xen/hvm/irq.h Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/include/xen/hvm/irq.h Mon Jul 27 13:54:56 2009 +0900
@@ -24,6 +24,7 @@
 
 #include <xen/types.h>
 #include <xen/spinlock.h>
+#include <xen/softirq.h>
 #include <asm/irq.h>
 #include <public/hvm/save.h>
 
@@ -88,6 +89,7 @@ struct hvm_irq_dpci {
     /* Record of mapped Links */
     uint8_t link_cnt[NR_LINK];
     struct timer hvm_timer[NR_VECTORS];
+    struct tasklet dirq_tasklet;
 };
 
 /* Modify state of a PCI INTx wire. */
@@ -108,6 +110,4 @@ void hvm_assert_evtchn_irq(struct vcpu *
 void hvm_assert_evtchn_irq(struct vcpu *v);
 void hvm_set_callback_via(struct domain *d, uint64_t via);
 
-void hvm_dirq_assist(struct vcpu *v);
-
 #endif /* __XEN_HVM_IRQ_H__ */
diff -r 8af26fef898c xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Fri Jul 24 12:08:54 2009 +0100
+++ b/xen/include/xen/softirq.h Mon Jul 27 13:54:56 2009 +0900
@@ -48,10 +48,7 @@ static inline void cpu_raise_softirq(uns
         smp_send_event_check_cpu(cpu);
 }
 
-static inline void raise_softirq(unsigned int nr)
-{
-    set_bit(nr, &softirq_pending(smp_processor_id()));
-}
+#define raise_softirq(nr) (set_bit((nr), &softirq_pending(smp_processor_id())))
 
 /*
  * TASKLETS -- dynamically-allocatable tasks run in softirq context
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.