[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Interrupt remapping to PIRQs in HVM guests



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1290174204 0
# Node ID 6663214f06acf34fbaa6d5eb3e64f29466c582bb
# Parent  b7ed352fa6100104374000cdbd845bbfc6478f08
Interrupt remapping to PIRQs in HVM guests

This patch allows HVM guests to remap interrupts and MSIs into pirqs;
once the mapping is in place the guest will receive the interrupt (or
the MSI) as an event.  The interrupt to be remapped can either be an
interrupt of an emulated device or an interrupt of a passthrough
device and we keep track of that.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c         |   17 +++
 xen/arch/x86/hvm/hvm.c        |   34 +++++++
 xen/arch/x86/hvm/irq.c        |   35 +++++---
 xen/arch/x86/irq.c            |   91 ++++++++++++++++++++-
 xen/arch/x86/physdev.c        |  181 ++++++++++++++++++++++++++++++------------
 xen/common/event_channel.c    |   32 ++++---
 xen/common/kernel.c           |    3 
 xen/drivers/passthrough/io.c  |   11 ++
 xen/include/asm-x86/domain.h  |    3 
 xen/include/asm-x86/irq.h     |    7 +
 xen/include/public/features.h |    3 
 11 files changed, 341 insertions(+), 76 deletions(-)

diff -r b7ed352fa610 -r 6663214f06ac xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/arch/x86/domain.c     Fri Nov 19 13:43:24 2010 +0000
@@ -509,6 +509,19 @@ int arch_domain_create(struct domain *d,
             if ( !IO_APIC_IRQ(i) )
                 d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
 
+        if ( is_hvm_domain(d) )
+        {
+            d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
+            d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
+            if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+                goto fail;
+            for (i = 0; i < d->nr_pirqs; i++)
+                d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
+            for (i = 0; i < nr_irqs; i++)
+                d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
+        }
+
+
         if ( (rc = iommu_domain_init(d)) != 0 )
             goto fail;
 
@@ -549,6 +562,8 @@ int arch_domain_create(struct domain *d,
     vmce_destroy_msr(d);
     xfree(d->arch.pirq_irq);
     xfree(d->arch.irq_pirq);
+    xfree(d->arch.pirq_emuirq);
+    xfree(d->arch.emuirq_pirq);
     free_xenheap_page(d->shared_info);
     if ( paging_initialised )
         paging_final_teardown(d);
@@ -600,6 +615,8 @@ void arch_domain_destroy(struct domain *
     free_xenheap_page(d->shared_info);
     xfree(d->arch.pirq_irq);
     xfree(d->arch.irq_pirq);
+    xfree(d->arch.pirq_emuirq);
+    xfree(d->arch.emuirq_pirq);
 }
 
 unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff -r b7ed352fa610 -r 6663214f06ac xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Nov 19 13:43:24 2010 +0000
@@ -2440,6 +2440,20 @@ static long hvm_memory_op(int cmd, XEN_G
     return rc;
 }
 
+static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    switch ( cmd )
+    {
+        case PHYSDEVOP_map_pirq:
+        case PHYSDEVOP_unmap_pirq:
+        case PHYSDEVOP_eoi:
+        case PHYSDEVOP_irq_status_query:
+            return do_physdev_op(cmd, arg);
+        default:
+            return -ENOSYS;
+    }
+}
+
 static long hvm_vcpu_op(
     int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
 {
@@ -2475,6 +2489,7 @@ static hvm_hypercall_t *hvm_hypercall32_
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+    [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
     HYPERCALL(sched_op),
@@ -2526,10 +2541,28 @@ static long hvm_vcpu_op_compat32(
     return rc;
 }
 
+static long hvm_physdev_op_compat32(
+    int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    switch ( cmd )
+    {
+        case PHYSDEVOP_map_pirq:
+        case PHYSDEVOP_unmap_pirq:
+        case PHYSDEVOP_eoi:
+        case PHYSDEVOP_irq_status_query:
+            return compat_physdev_op(cmd, arg);
+        break;
+    default:
+            return -ENOSYS;
+        break;
+    }
+}
+
 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+    [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
     HYPERCALL(sched_op),
@@ -2543,6 +2576,7 @@ static hvm_hypercall_t *hvm_hypercall32_
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t 
*)hvm_grant_table_op_compat32,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32,
+    [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op_compat32,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
     HYPERCALL(sched_op),
diff -r b7ed352fa610 -r 6663214f06ac xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/arch/x86/hvm/irq.c    Fri Nov 19 13:43:24 2010 +0000
@@ -23,8 +23,29 @@
 #include <xen/types.h>
 #include <xen/event.h>
 #include <xen/sched.h>
+#include <xen/irq.h>
 #include <asm/hvm/domain.h>
 #include <asm/hvm/support.h>
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+{
+    int pirq = domain_emuirq_to_pirq(d, ioapic_gsi);
+    if ( pirq != IRQ_UNBOUND )
+    {
+        send_guest_pirq(d, pirq);
+        return;
+    }
+    vioapic_irq_positive_edge(d, ioapic_gsi);
+    vpic_irq_positive_edge(d, pic_irq);
+}
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void deassert_irq(struct domain *d, unsigned isa_irq)
+{
+    if ( domain_emuirq_to_pirq(d, isa_irq) != IRQ_UNBOUND )
+        vpic_irq_negative_edge(d, isa_irq);
+}
 
 static void __hvm_pci_intx_assert(
     struct domain *d, unsigned int device, unsigned int intx)
@@ -45,10 +66,7 @@ static void __hvm_pci_intx_assert(
     isa_irq = hvm_irq->pci_link.route[link];
     if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
          (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
-    {
-        vioapic_irq_positive_edge(d, isa_irq);
-        vpic_irq_positive_edge(d, isa_irq);
-    }
+        assert_irq(d, isa_irq, isa_irq);
 }
 
 void hvm_pci_intx_assert(
@@ -77,7 +95,7 @@ static void __hvm_pci_intx_deassert(
     isa_irq = hvm_irq->pci_link.route[link];
     if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
          (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
-        vpic_irq_negative_edge(d, isa_irq);
+        deassert_irq(d, isa_irq);
 }
 
 void hvm_pci_intx_deassert(
@@ -100,10 +118,7 @@ void hvm_isa_irq_assert(
 
     if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (hvm_irq->gsi_assert_count[gsi]++ == 0) )
-    {
-        vioapic_irq_positive_edge(d, gsi);
-        vpic_irq_positive_edge(d, isa_irq);
-    }
+        assert_irq(d, gsi, isa_irq);
 
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
@@ -120,7 +135,7 @@ void hvm_isa_irq_deassert(
 
     if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (--hvm_irq->gsi_assert_count[gsi] == 0) )
-        vpic_irq_negative_edge(d, isa_irq);
+        deassert_irq(d, isa_irq);
 
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
diff -r b7ed352fa610 -r 6663214f06ac xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/arch/x86/irq.c        Fri Nov 19 13:43:24 2010 +0000
@@ -1453,7 +1453,11 @@ int get_free_pirq(struct domain *d, int 
     {
         for ( i = 16; i < nr_irqs_gsi; i++ )
             if ( !d->arch.pirq_irq[i] )
-                break;
+            {
+                if ( !is_hvm_domain(d) ||
+                        d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+                    break;
+            }
         if ( i == nr_irqs_gsi )
             return -ENOSPC;
     }
@@ -1461,7 +1465,11 @@ int get_free_pirq(struct domain *d, int 
     {
         for ( i = d->nr_pirqs - 1; i >= nr_irqs_gsi; i-- )
             if ( !d->arch.pirq_irq[i] )
-                break;
+            {
+                if ( !is_hvm_domain(d) ||
+                        d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+                    break;
+            }
         if ( i < nr_irqs_gsi )
             return -ENOSPC;
     }
@@ -1792,3 +1800,82 @@ void fixup_irqs(void)
         peoi[sp].ready = 1;
     flush_ready_eoi();
 }
+
+int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
+{
+    int old_emuirq = IRQ_UNBOUND, old_pirq = IRQ_UNBOUND;
+
+    ASSERT(spin_is_locked(&d->event_lock));
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( pirq < 0 || pirq >= d->nr_pirqs ||
+            emuirq == IRQ_UNBOUND || emuirq >= (int) nr_irqs )
+    {
+        dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or emuirq %d\n",
+                d->domain_id, pirq, emuirq);
+        return -EINVAL;
+    }
+
+    old_emuirq = domain_pirq_to_emuirq(d, pirq);
+    if ( emuirq != IRQ_PT )
+        old_pirq = domain_emuirq_to_pirq(d, emuirq);
+
+    if ( (old_emuirq != IRQ_UNBOUND && (old_emuirq != emuirq) ) ||
+         (old_pirq != IRQ_UNBOUND && (old_pirq != pirq)) )
+    {
+        dprintk(XENLOG_G_WARNING, "dom%d: pirq %d or emuirq %d already 
mapped\n",
+                d->domain_id, pirq, emuirq);
+        return 0;
+    }
+
+    d->arch.pirq_emuirq[pirq] = emuirq;
+    /* do not store emuirq mappings for pt devices */
+    if ( emuirq != IRQ_PT )
+        d->arch.emuirq_pirq[emuirq] = pirq;
+
+    return 0;
+}
+
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
+{
+    int emuirq, ret = 0;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
+        return -EINVAL;
+
+    ASSERT(spin_is_locked(&d->event_lock));
+
+    emuirq = domain_pirq_to_emuirq(d, pirq);
+    if ( emuirq == IRQ_UNBOUND )
+    {
+        dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
+                d->domain_id, pirq);
+        ret = -EINVAL;
+        goto done;
+    }
+
+    d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
+    d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+
+ done:
+    return ret;
+}
+
+int hvm_domain_use_pirq(struct domain *d, int pirq)
+{
+    int emuirq;
+    
+    if ( !is_hvm_domain(d) )
+        return 0;
+
+    emuirq = domain_pirq_to_emuirq(d, pirq);
+    if ( emuirq != IRQ_UNBOUND && d->pirq_to_evtchn[pirq] != 0 )
+        return 1;
+    else
+        return 0;
+}
diff -r b7ed352fa610 -r 6663214f06ac xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/arch/x86/physdev.c    Fri Nov 19 13:43:24 2010 +0000
@@ -27,6 +27,59 @@ ioapic_guest_write(
 ioapic_guest_write(
     unsigned long physbase, unsigned int reg, u32 pval);
 
+static int physdev_hvm_map_pirq(
+    struct domain *d, struct physdev_map_pirq *map)
+{
+    int pirq, ret = 0;
+
+    spin_lock(&d->event_lock);
+    switch ( map->type )
+    {
+    case MAP_PIRQ_TYPE_GSI: {
+        struct hvm_irq_dpci *hvm_irq_dpci;
+        struct hvm_girq_dpci_mapping *girq;
+        uint32_t machine_gsi = 0;
+
+        /* find the machine gsi corresponding to the
+         * emulated gsi */
+        hvm_irq_dpci = domain_get_irq_dpci(d);
+        if ( hvm_irq_dpci )
+        {
+            list_for_each_entry ( girq,
+                                  &hvm_irq_dpci->girq[map->index],
+                                  list )
+                machine_gsi = girq->machine_gsi;
+        }
+        /* found one, this mean we are dealing with a pt device */
+        if ( machine_gsi )
+        {
+            map->index = domain_pirq_to_irq(d, machine_gsi);
+            pirq = machine_gsi;
+            ret = (pirq > 0) ? 0 : pirq;
+        }
+        /* we didn't find any, this means we are dealing
+         * with an emulated device */
+        else
+        {
+            pirq = map->pirq;
+            if ( pirq < 0 )
+                pirq = get_free_pirq(d, map->type, map->index);
+            ret = map_domain_emuirq_pirq(d, pirq, map->index);
+        }
+        map->pirq = pirq;
+        break;
+    }
+
+    default:
+        ret = -EINVAL;
+        dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", 
map->type);
+        break;
+    }
+
+    spin_unlock(&d->event_lock);
+    return ret;
+}
+
 static int physdev_map_pirq(struct physdev_map_pirq *map)
 {
     struct domain *d;
@@ -43,6 +96,12 @@ static int physdev_map_pirq(struct physd
     if ( d == NULL )
         return -ESRCH;
 
+    if ( map->domid == DOMID_SELF && is_hvm_domain(d) )
+    {
+        ret = physdev_hvm_map_pirq(d, map);
+        goto free_domain;
+    }
+
     if ( !IS_PRIV_FOR(current->domain, d) )
     {
         ret = -EPERM;
@@ -52,55 +111,55 @@ static int physdev_map_pirq(struct physd
     /* Verify or get irq. */
     switch ( map->type )
     {
-        case MAP_PIRQ_TYPE_GSI:
-            if ( map->index < 0 || map->index >= nr_irqs_gsi )
-            {
-                dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
-                        d->domain_id, map->index);
-                ret = -EINVAL;
-                goto free_domain;
-            }
-
-            irq = domain_pirq_to_irq(current->domain, map->index);
-            if ( !irq )
-            {
-                if ( IS_PRIV(current->domain) )
-                    irq = map->index;
-                else {
-                    dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect 
irq!\n",
-                            d->domain_id);
-                    ret = -EINVAL;
-                    goto free_domain;
-                }
-            }
-            break;
-
-        case MAP_PIRQ_TYPE_MSI:
-            irq = map->index;
-            if ( irq == -1 )
-                irq = create_irq();
-
-            if ( irq < 0 || irq >= nr_irqs )
-            {
-                dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
+    case MAP_PIRQ_TYPE_GSI:
+        if ( map->index < 0 || map->index >= nr_irqs_gsi )
+        {
+            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
+                    d->domain_id, map->index);
+            ret = -EINVAL;
+            goto free_domain;
+        }
+
+        irq = domain_pirq_to_irq(current->domain, map->index);
+        if ( !irq )
+        {
+            if ( IS_PRIV(current->domain) )
+                irq = map->index;
+            else {
+                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                         d->domain_id);
                 ret = -EINVAL;
                 goto free_domain;
             }
-
-            _msi.bus = map->bus;
-            _msi.devfn = map->devfn;
-            _msi.entry_nr = map->entry_nr;
-            _msi.table_base = map->table_base;
-            _msi.irq = irq;
-            map_data = &_msi;
-            break;
-
-        default:
-            dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
-                    d->domain_id, map->type);
+        }
+        break;
+
+    case MAP_PIRQ_TYPE_MSI:
+        irq = map->index;
+        if ( irq == -1 )
+            irq = create_irq();
+
+        if ( irq < 0 || irq >= nr_irqs )
+        {
+            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
+                    d->domain_id);
             ret = -EINVAL;
             goto free_domain;
+        }
+
+        _msi.bus = map->bus;
+        _msi.devfn = map->devfn;
+        _msi.entry_nr = map->entry_nr;
+        _msi.table_base = map->table_base;
+        _msi.irq = irq;
+        map_data = &_msi;
+        break;
+
+    default:
+        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
+                d->domain_id, map->type);
+        ret = -EINVAL;
+        goto free_domain;
     }
 
     spin_lock(&pcidevs_lock);
@@ -148,12 +207,15 @@ static int physdev_map_pirq(struct physd
     if ( ret == 0 )
         map->pirq = pirq;
 
-done:
+    if ( !ret && is_hvm_domain(d) )
+        map_domain_emuirq_pirq(d, pirq, IRQ_PT);
+
+ done:
     spin_unlock(&d->event_lock);
     spin_unlock(&pcidevs_lock);
     if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
         destroy_irq(irq);
-free_domain:
+ free_domain:
     rcu_unlock_domain(d);
     return ret;
 }
@@ -168,6 +230,14 @@ static int physdev_unmap_pirq(struct phy
 
     if ( d == NULL )
         return -ESRCH;
+
+    if ( is_hvm_domain(d) )
+    {
+        spin_lock(&d->event_lock);
+        ret = unmap_domain_pirq_emuirq(d, unmap->pirq);
+        spin_unlock(&d->event_lock);
+        goto free_domain;
+    }
 
     ret = -EPERM;
     if ( !IS_PRIV_FOR(current->domain, d) )
@@ -179,7 +249,7 @@ static int physdev_unmap_pirq(struct phy
     spin_unlock(&d->event_lock);
     spin_unlock(&pcidevs_lock);
 
-free_domain:
+ free_domain:
     rcu_unlock_domain(d);
     return ret;
 }
@@ -202,7 +272,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
         if ( v->domain->arch.pirq_eoi_map )
             evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
-        ret = pirq_guest_eoi(v->domain, eoi.irq);
+        if ( !is_hvm_domain(v->domain) ||
+             domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
+            ret = pirq_guest_eoi(v->domain, eoi.irq);
+        else
+            ret = 0;
         break;
     }
 
@@ -257,6 +331,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
             break;
         irq_status_query.flags = 0;
+        if ( is_hvm_domain(v->domain) &&
+             domain_pirq_to_emuirq(v->domain, irq) != IRQ_PT )
+        {
+            ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
+            break;
+        }
+
         /*
          * Even edge-triggered or message-based IRQs can need masking from
          * time to time. If teh guest is not dynamically checking for this
@@ -345,9 +426,9 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
 
         /* Vector is only used by hypervisor, and dom0 shouldn't
-         touch it in its world, return irq_op.irq as the vecotr,
-         and make this hypercall dummy, and also defer the vector 
-         allocation when dom0 tries to programe ioapic entry. */
+           touch it in its world, return irq_op.irq as the vecotr,
+           and make this hypercall dummy, and also defer the vector 
+           allocation when dom0 tries to programe ioapic entry. */
         irq_op.vector = irq_op.irq;
         ret = 0;
         
diff -r b7ed352fa610 -r 6663214f06ac xen/common/event_channel.c
--- a/xen/common/event_channel.c        Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/common/event_channel.c        Fri Nov 19 13:43:24 2010 +0000
@@ -331,7 +331,7 @@ static long evtchn_bind_pirq(evtchn_bind
     if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
         return -EINVAL;
 
-    if ( !irq_access_permitted(d, pirq) )
+    if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
         return -EPERM;
 
     spin_lock(&d->event_lock);
@@ -345,12 +345,15 @@ static long evtchn_bind_pirq(evtchn_bind
     chn = evtchn_from_port(d, port);
 
     d->pirq_to_evtchn[pirq] = port;
-    rc = pirq_guest_bind(v, pirq,
-                         !!(bind->flags & BIND_PIRQ__WILL_SHARE));
-    if ( rc != 0 )
-    {
-        d->pirq_to_evtchn[pirq] = 0;
-        goto out;
+    if ( !is_hvm_domain(d) )
+    {
+        rc = pirq_guest_bind(
+            v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE));
+        if ( rc != 0 )
+        {
+            d->pirq_to_evtchn[pirq] = 0;
+            goto out;
+        }
     }
 
     chn->state  = ECS_PIRQ;
@@ -403,7 +406,8 @@ static long __evtchn_close(struct domain
         break;
 
     case ECS_PIRQ:
-        pirq_guest_unbind(d1, chn1->u.pirq.irq);
+        if ( !is_hvm_domain(d1) )
+            pirq_guest_unbind(d1, chn1->u.pirq.irq);
         d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
         unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
         break;
@@ -662,10 +666,16 @@ int send_guest_pirq(struct domain *d, in
     struct evtchn *chn;
 
     /*
-     * It should not be possible to race with __evtchn_close():
-     * The caller of this function must synchronise with pirq_guest_unbind().
+     * PV guests: It should not be possible to race with __evtchn_close(). The
+     *     caller of this function must synchronise with pirq_guest_unbind().
+     * HVM guests: Port is legitimately zero when the guest disables the
+     *     emulated interrupt/evtchn.
      */
-    ASSERT(port != 0);
+    if ( port == 0 )
+    {
+        BUG_ON(!is_hvm_domain(d));
+        return 0;
+    }
 
     chn = evtchn_from_port(d, port);
     return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
diff -r b7ed352fa610 -r 6663214f06ac xen/common/kernel.c
--- a/xen/common/kernel.c       Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/common/kernel.c       Fri Nov 19 13:43:24 2010 +0000
@@ -277,7 +277,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
                              (1U << XENFEAT_gnttab_map_avail_bits);
             else
                 fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
-                             (1U << XENFEAT_hvm_callback_vector);
+                             (1U << XENFEAT_hvm_callback_vector) |
+                             (1U << XENFEAT_hvm_pirqs);
 #endif
             break;
         default:
diff -r b7ed352fa610 -r 6663214f06ac xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/drivers/passthrough/io.c      Fri Nov 19 13:43:24 2010 +0000
@@ -375,6 +375,7 @@ int pt_irq_destroy_bind_vtd(
             hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
             hvm_irq_dpci->mirq[machine_gsi].flags = 0;
             clear_bit(machine_gsi, hvm_irq_dpci->mapping);
+            unmap_domain_pirq_emuirq(d, machine_gsi);
         }
     }
     spin_unlock(&d->event_lock);
@@ -454,7 +455,10 @@ extern int vmsi_deliver(struct domain *d
 extern int vmsi_deliver(struct domain *d, int pirq);
 static int hvm_pci_msi_assert(struct domain *d, int pirq)
 {
-    return vmsi_deliver(d, pirq);
+    if ( hvm_domain_use_pirq(d, pirq) )
+        return send_guest_pirq(d, pirq);
+    else
+        return vmsi_deliver(d, pirq);
 }
 #endif
 
@@ -488,7 +492,10 @@ static void hvm_dirq_assist(unsigned lon
         {
             device = digl->device;
             intx = digl->intx;
-            hvm_pci_intx_assert(d, device, intx);
+            if ( hvm_domain_use_pirq(d, pirq) )
+                send_guest_pirq(d, pirq);
+            else
+                hvm_pci_intx_assert(d, device, intx);
             hvm_irq_dpci->mirq[pirq].pending++;
 
 #ifdef SUPPORT_MSI_REMAPPING
diff -r b7ed352fa610 -r 6663214f06ac xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/include/asm-x86/domain.h      Fri Nov 19 13:43:24 2010 +0000
@@ -261,6 +261,9 @@ struct arch_domain
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     int *irq_pirq;
     int *pirq_irq;
+    /* pirq to emulated irq and vice versa */
+    int *emuirq_pirq;
+    int *pirq_emuirq;
 
     /* Shared page for notifying that explicit PIRQ EOI is required. */
     unsigned long *pirq_eoi_map;
diff -r b7ed352fa610 -r 6663214f06ac xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/include/asm-x86/irq.h Fri Nov 19 13:43:24 2010 +0000
@@ -114,6 +114,9 @@ int unmap_domain_pirq(struct domain *d, 
 int unmap_domain_pirq(struct domain *d, int pirq);
 int get_free_pirq(struct domain *d, int type, int index);
 void free_domain_pirqs(struct domain *d);
+int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
+int hvm_domain_use_pirq(struct domain *d, int irq);
 
 int  init_irq_data(void);
 
@@ -147,6 +150,10 @@ void irq_set_affinity(struct irq_desc *,
 
 #define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
 #define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
+#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
+#define IRQ_UNBOUND -1
+#define IRQ_PT -2
 
 bool_t cpu_has_pending_apic_eoi(void);
 
diff -r b7ed352fa610 -r 6663214f06ac xen/include/public/features.h
--- a/xen/include/public/features.h     Fri Nov 19 13:26:42 2010 +0000
+++ b/xen/include/public/features.h     Fri Nov 19 13:43:24 2010 +0000
@@ -74,6 +74,9 @@
 /* x86: pvclock algorithm is safe to use on HVM */
 #define XENFEAT_hvm_safe_pvclock           9
 
+/* x86: pirq can be used by HVM guests */
+#define XENFEAT_hvm_pirqs           10
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.