[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] MSI 5/6: add MSI support to passthrough HVM domain



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1209634383 -3600
# Node ID ad55c06c9bbc31e4e3db2944f3a1fcbf842bd4aa
# Parent  a0ebceaf41ff8ebda5e2478c03fd6e382ddc4b7f
MSI 5/6: add MSI support to passthrough HVM domain

Currently it only inercept access to MSI config space, no MSI-x support.

Signed-off-by: Jiang Yunhong <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
---
 tools/ioemu/Makefile.target           |    2 
 tools/ioemu/hw/pass-through.c         |   40 ++
 tools/ioemu/hw/pass-through.h         |    9 
 tools/ioemu/hw/pt-msi.c               |  488 ++++++++++++++++++++++++++++++++++
 tools/ioemu/hw/pt-msi.h               |   65 ++++
 tools/libxc/xc_domain.c               |   26 +
 tools/libxc/xc_physdev.c              |   32 ++
 tools/libxc/xenctrl.h                 |   16 +
 xen/arch/x86/hvm/Makefile             |    1 
 xen/arch/x86/hvm/vlapic.c             |    3 
 xen/arch/x86/hvm/vmsi.c               |  189 +++++++++++++
 xen/arch/x86/hvm/vmx/intr.c           |   12 
 xen/drivers/passthrough/io.c          |  118 +++++---
 xen/drivers/passthrough/iommu.c       |    2 
 xen/drivers/passthrough/vtd/x86/vtd.c |    2 
 xen/include/asm-x86/hvm/io.h          |    1 
 xen/include/asm-x86/hvm/irq.h         |   13 
 xen/include/public/domctl.h           |    7 
 18 files changed, 977 insertions(+), 49 deletions(-)

diff -r a0ebceaf41ff -r ad55c06c9bbc tools/ioemu/Makefile.target
--- a/tools/ioemu/Makefile.target       Thu May 01 10:32:10 2008 +0100
+++ b/tools/ioemu/Makefile.target       Thu May 01 10:33:03 2008 +0100
@@ -370,7 +370,7 @@ endif
 
 ifdef CONFIG_PASSTHROUGH
 LIBS+=-lpci
-VL_OBJS+= pass-through.o
+VL_OBJS+= pass-through.o pt-msi.o
 CFLAGS += -DCONFIG_PASSTHROUGH
 $(info *** PCI passthrough capability has been enabled ***)
 endif
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/ioemu/hw/pass-through.c
--- a/tools/ioemu/hw/pass-through.c     Thu May 01 10:32:10 2008 +0100
+++ b/tools/ioemu/hw/pass-through.c     Thu May 01 10:33:03 2008 +0100
@@ -26,6 +26,7 @@
 #include "pass-through.h"
 #include "pci/header.h"
 #include "pci/pci.h"
+#include "pt-msi.h"
 
 extern FILE *logfile;
 
@@ -286,6 +287,9 @@ static void pt_pci_write_config(PCIDevic
         pci_default_write_config(d, address, val, len);
         return;
     }
+
+    if ( pt_msi_write(assigned_device, address, val, len) )
+        return;
 
     /* PCI config pass-through */
     if (address == 0x4) {
@@ -333,6 +337,7 @@ static uint32_t pt_pci_read_config(PCIDe
         break;
     }
 
+    pt_msi_read(assigned_device, address, len, &val);
 exit:
 
 #ifdef PT_DEBUG_PCI_CONFIG_ACCESS
@@ -445,11 +450,41 @@ static int pt_unregister_regions(struct 
 
 }
 
+uint8_t find_cap_offset(struct pci_dev *pci_dev, uint8_t cap)
+{
+    int id;
+    int max_cap = 48;
+    int pos = PCI_CAPABILITY_LIST;
+    int status;
+
+    status = pci_read_byte(pci_dev, PCI_STATUS);
+    if ( (status & PCI_STATUS_CAP_LIST) == 0 )
+        return 0;
+
+    while ( max_cap-- )
+    {
+        pos = pci_read_byte(pci_dev, pos);
+        if ( pos < 0x40 )
+            break;
+
+        pos &= ~3;
+        id = pci_read_byte(pci_dev, pos + PCI_CAP_LIST_ID);
+
+        if ( id == 0xff )
+            break;
+        if ( id == cap )
+            return pos;
+
+        pos += PCI_CAP_LIST_NEXT;
+    }
+    return 0;
+}
+
 struct pt_dev * register_real_device(PCIBus *e_bus,
         const char *e_dev_name, int e_devfn, uint8_t r_bus, uint8_t r_dev,
         uint8_t r_func, uint32_t machine_irq, struct pci_access *pci_access)
 {
-    int rc = -1, i;
+    int rc = -1, i, pos;
     struct pt_dev *assigned_device = NULL;
     struct pci_dev *pci_dev;
     uint8_t e_device, e_intx;
@@ -510,6 +545,9 @@ struct pt_dev * register_real_device(PCI
     /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
     for ( i = 0; i < PCI_CONFIG_SIZE; i++ )
         assigned_device->dev.config[i] = pci_read_byte(pci_dev, i);
+
+    if ( (pos = find_cap_offset(pci_dev, PCI_CAP_ID_MSI)) )
+        pt_msi_init(assigned_device, pos);
 
     /* Handle real device's MMIO/PIO BARs */
     pt_register_regions(assigned_device);
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/ioemu/hw/pass-through.h
--- a/tools/ioemu/hw/pass-through.h     Thu May 01 10:32:10 2008 +0100
+++ b/tools/ioemu/hw/pass-through.h     Thu May 01 10:33:03 2008 +0100
@@ -57,6 +57,14 @@ struct pt_region {
     } access;
 };
 
+struct pt_msi_info {
+    uint32_t flags;
+    int offset;
+    int size;
+    int pvec;   /* physical vector used */
+    int pirq;  /* guest pirq corresponding */
+};
+
 /*
     This structure holds the context of the mapping functions
     and data that is relevant for qemu device management.
@@ -65,6 +73,7 @@ struct pt_dev {
     PCIDevice dev;
     struct pci_dev *pci_dev;                     /* libpci struct */
     struct pt_region bases[PCI_NUM_REGIONS];    /* Access regions */
+    struct pt_msi_info *msi;                    /* MSI virtualization */
 };
 
 /* Used for formatting PCI BDF into cf8 format */
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/ioemu/hw/pt-msi.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ioemu/hw/pt-msi.c   Thu May 01 10:33:03 2008 +0100
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Jiang Yunhong <yunhong.jiang@xxxxxxxxx>
+ *
+ * This file implements direct PCI assignment to a HVM guest
+ */
+
+#include "pt-msi.h"
+
+#define PT_MSI_CTRL_WR_MASK_HI      (0x1)
+#define PT_MSI_CTRL_WR_MASK_LO      (0x8E)
+#define PT_MSI_DATA_WR_MASK         (0x38)
+int pt_msi_init(struct pt_dev *dev, int pos)
+{
+    uint8_t id;
+    uint16_t flags;
+    struct pci_dev *pd = dev->pci_dev;
+    PCIDevice *d = (struct PCIDevice *)dev;
+
+    id = pci_read_byte(pd, pos + PCI_CAP_LIST_ID);
+
+    if ( id != PCI_CAP_ID_MSI )
+    {
+        PT_LOG("pt_msi_init: error id %x pos %x\n", id, pos);
+        return -1;
+    }
+
+    dev->msi = malloc(sizeof(struct pt_msi_info));
+    if ( !dev->msi )
+    {
+        PT_LOG("pt_msi_init: error allocation pt_msi_info\n");
+        return -1;
+    }
+    memset(dev->msi, 0, sizeof(struct pt_msi_info));
+
+    dev->msi->offset = pos;
+    dev->msi->size = 0xa;
+
+    flags = pci_read_byte(pd, pos + PCI_MSI_FLAGS);
+    if ( flags & PCI_MSI_FLAGS_ENABLE )
+    {
+        PT_LOG("pt_msi_init: MSI enabled already, disable first\n");
+        pci_write_byte(pd, pos + PCI_MSI_FLAGS, flags & ~PCI_MSI_FLAGS_ENABLE);
+    }
+    dev->msi->flags |= (flags | MSI_FLAG_UNINIT);
+
+    if ( flags & PCI_MSI_FLAGS_64BIT )
+        dev->msi->size += 4;
+    if ( flags & PCI_MSI_FLAGS_PVMASK )
+        dev->msi->size += 10;
+
+    /* All register is 0 after reset, except first 4 byte */
+    *(uint32_t *)(&d->config[pos]) = pci_read_long(pd, pos);
+    d->config[pos + 2] &=  PT_MSI_CTRL_WR_MASK_LO;
+    d->config[pos + 3] &=  PT_MSI_CTRL_WR_MASK_HI;
+
+    return 0;
+}
+
+/*
+ * setup physical msi, but didn't enable it
+ */
+static int pt_msi_setup(struct pt_dev *dev)
+{
+    int vector = -1, pirq = -1;
+
+    if ( !(dev->msi->flags & MSI_FLAG_UNINIT) )
+    {
+        PT_LOG("setup physical after initialized?? \n");
+        return -1;
+    }
+
+    if ( xc_physdev_map_pirq_msi(xc_handle, domid, MAP_PIRQ_TYPE_MSI,
+                            vector, &pirq,
+                                                       dev->pci_dev->dev << 3 
| dev->pci_dev->func,
+                                                       dev->pci_dev->bus, 1) )
+    {
+        PT_LOG("error map vector %x\n", vector);
+        return -1;
+    }
+    dev->msi->pirq = pirq;
+    PT_LOG("vector %x pirq %x\n", vector, pirq);
+
+    return 0;
+}
+
+/*
+ * caller should make sure mask is supported
+ */
+static uint32_t get_msi_gmask(struct pt_dev *d)
+{
+    struct PCIDevice *pd = (struct PCIDevice *)d;
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+        return *(uint32_t *)(pd->config + d->msi->offset + 0xc);
+    else
+        return *(uint32_t *)(pd->config + d->msi->offset + 0x10);
+
+}
+
+static uint16_t get_msi_gdata(struct pt_dev *d)
+{
+    struct PCIDevice *pd = (struct PCIDevice *)d;
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+        return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_64);
+    else
+        return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_32);
+}
+
+static uint64_t get_msi_gaddr(struct pt_dev *d)
+{
+    struct PCIDevice *pd = (struct PCIDevice *)d;
+    uint32_t addr_hi;
+    uint64_t addr = 0;
+
+    addr =(uint64_t)(*(uint32_t *)(pd->config +
+                     d->msi->offset + PCI_MSI_ADDRESS_LO));
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+    {
+        addr_hi = *(uint32_t *)(pd->config + d->msi->offset
+                                + PCI_MSI_ADDRESS_HI);
+        addr |= (uint64_t)addr_hi << 32;
+    }
+    return addr;
+}
+
+static uint8_t get_msi_gctrl(struct pt_dev *d)
+{
+    struct PCIDevice *pd = (struct PCIDevice *)d;
+
+    return  *(uint8_t *)(pd->config + d->msi->offset + PCI_MSI_FLAGS);
+}
+
+static uint32_t get_msi_gflags(struct pt_dev *d)
+{
+    uint32_t result = 0;
+    int rh, dm, dest_id, deliv_mode, trig_mode;
+    uint16_t data;
+    uint64_t addr;
+
+    data = get_msi_gdata(d);
+    addr = get_msi_gaddr(d);
+
+    rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
+    dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+    dest_id = (addr >> MSI_TARGET_CPU_SHIFT) & 0xff;
+    deliv_mode = (data >> MSI_DATA_DELIVERY_SHIFT) & 0x7;
+    trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
+    result |= dest_id | (rh << GFLAGS_SHIFT_RH) | (dm << GFLAGS_SHIFT_DM) | \
+                (deliv_mode << GLFAGS_SHIFT_DELIV_MODE) |
+                (trig_mode << GLFAGS_SHIFT_TRG_MODE);
+
+    return result;
+}
+
+/*
+ * This may be arch different
+ */
+static inline uint8_t get_msi_gvec(struct pt_dev *d)
+{
+    return get_msi_gdata(d) & 0xff;
+}
+
+static inline uint8_t get_msi_hvec(struct pt_dev *d)
+{
+    struct pci_dev *pd = d->pci_dev;
+    uint16_t data;
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+        data = pci_read_word(pd, PCI_MSI_DATA_64);
+    else
+        data = pci_read_word(pd, PCI_MSI_DATA_32);
+
+    return data & 0xff;
+}
+
+/*
+ * Update msi mapping, usually called when MSI enabled,
+ * except the first time
+ */
+static int pt_msi_update(struct pt_dev *d)
+{
+    PT_LOG("now update msi with pirq %x gvec %x\n",
+            get_msi_gvec(d), d->msi->pirq);
+    return xc_domain_update_msi_irq(xc_handle, domid, get_msi_gvec(d),
+                                     d->msi->pirq, get_msi_gflags(d));
+}
+
+static int pt_msi_enable(struct pt_dev *d, int enable)
+{
+    uint16_t ctrl;
+    struct pci_dev *pd = d->pci_dev;
+
+    if ( !pd )
+        return -1;
+
+    ctrl = pci_read_word(pd, d->msi->offset + PCI_MSI_FLAGS);
+
+    if ( enable )
+        ctrl |= PCI_MSI_FLAGS_ENABLE;
+    else
+        ctrl &= ~PCI_MSI_FLAGS_ENABLE;
+
+    pci_write_word(pd, d->msi->offset + PCI_MSI_FLAGS, ctrl);
+    return 0;
+}
+
+static int pt_msi_control_update(struct pt_dev *d, uint16_t old_ctrl)
+{
+    uint16_t new_ctrl;
+    PCIDevice *pd = (PCIDevice *)d;
+
+    new_ctrl = get_msi_gctrl(d);
+
+    PT_LOG("old_ctrl %x new_Ctrl %x\n", old_ctrl, new_ctrl);
+
+    if ( new_ctrl & PCI_MSI_FLAGS_ENABLE )
+    {
+        if ( d->msi->flags & MSI_FLAG_UNINIT )
+        {
+            /* Init physical one */
+            PT_LOG("setup msi for dev %x\n", pd->devfn);
+            if ( pt_msi_setup(d) )
+            {
+                PT_LOG("pt_msi_setup error!!!\n");
+                return -1;
+            }
+            pt_msi_update(d);
+
+            d->msi->flags &= ~MSI_FLAG_UNINIT;
+            d->msi->flags |= PT_MSI_MAPPED;
+
+            /* Enable physical MSI only after bind */
+            pt_msi_enable(d, 1);
+        }
+        else if ( !(old_ctrl & PCI_MSI_FLAGS_ENABLE) )
+            pt_msi_enable(d, 1);
+    }
+    else if ( old_ctrl & PCI_MSI_FLAGS_ENABLE )
+        pt_msi_enable(d, 0);
+
+    /* Currently no support for multi-vector */
+    if ( (new_ctrl & PCI_MSI_FLAGS_QSIZE) != 0x0 )
+        PT_LOG("try to set more than 1 vector ctrl %x\n", new_ctrl);
+
+    return 0;
+}
+
+static int
+pt_msi_map_update(struct pt_dev *d, uint32_t old_data, uint64_t old_addr)
+{
+    uint16_t pctrl;
+    uint32_t data;
+    uint64_t addr;
+
+    data = get_msi_gdata(d);
+    addr = get_msi_gaddr(d);
+
+    PT_LOG("old_data %x old_addr %lx data %x addr %lx\n",
+            old_data, old_addr, data, addr);
+
+    if ( data != old_data || addr != old_addr )
+        if ( get_msi_gctrl(d) & PCI_MSI_FLAGS_ENABLE )
+            pt_msi_update(d);
+
+    return 0;
+}
+
+static int pt_msi_mask_update(struct pt_dev *d, uint32_t old_mask)
+{
+    struct pci_dev *pd = d->pci_dev;
+    uint32_t mask;
+    int offset;
+
+    if ( !(d->msi->flags & PCI_MSI_FLAGS_PVMASK) )
+        return -1;
+
+    mask = get_msi_gmask(d);
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+        offset = d->msi->offset + 0xc;
+    else
+        offset = d->msi->offset + 0x10;
+
+    if ( old_mask != mask )
+        pci_write_long(pd, offset, mask);
+}
+
+#define ACCESSED_DATA 0x2
+#define ACCESSED_MASK 0x4
+#define ACCESSED_ADDR 0x8
+#define ACCESSED_CTRL 0x10
+
+int pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len)
+{
+    struct pci_dev *pd;
+    int i, cur = addr;
+    uint8_t value, flags = 0;
+    uint16_t old_ctrl = 0, old_data = 0;
+    uint32_t old_mask = 0;
+    uint64_t old_addr = 0;
+    PCIDevice *dev = (PCIDevice *)d;
+    int can_write = 1;
+
+    if ( !d || !d->msi )
+        return 0;
+
+    if ( (addr >= (d->msi->offset + d->msi->size) ) ||
+         (addr + len) < d->msi->offset)
+        return 0;
+
+    PT_LOG("addr %x val %x len %x offset %x size %x\n",
+            addr, val, len, d->msi->offset, d->msi->size);
+
+    pd = d->pci_dev;
+    old_ctrl = get_msi_gctrl(d);
+    old_addr = get_msi_gaddr(d);
+    old_data = get_msi_gdata(d);
+
+    if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+        old_mask = get_msi_gmask(d);
+
+    for ( i = 0; i < len; i++, cur++ )
+    {
+        int off;
+        uint8_t orig_value;
+
+        if ( cur < d->msi->offset )
+            continue;
+        else if ( cur >= (d->msi->offset + d->msi->size) )
+            break;
+
+        off = cur - d->msi->offset;
+        value = (val >> (i * 8)) & 0xff;
+
+        switch ( off )
+        {
+            case 0x0 ... 0x1:
+                can_write = 0;
+                break;
+            case 0x2:
+            case 0x3:
+                flags |= ACCESSED_CTRL;
+
+                orig_value = pci_read_byte(pd, d->msi->offset + off);
+
+                orig_value &= (off == 2) ? PT_MSI_CTRL_WR_MASK_LO:
+                                      PT_MSI_CTRL_WR_MASK_HI;
+
+                orig_value |= value & ( (off == 2) ? ~PT_MSI_CTRL_WR_MASK_LO:
+                                              ~PT_MSI_CTRL_WR_MASK_HI);
+                value = orig_value;
+                break;
+            case 0x4 ... 0x7:
+                flags |= ACCESSED_ADDR;
+                /* bit 4 ~ 11 is reserved for MSI in x86 */
+                if ( off == 0x4 )
+                    value &= 0x0f;
+                if ( off == 0x5 )
+                    value &= 0xf0;
+                break;
+            case 0x8 ... 0xb:
+                if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+                {
+                    /* Up 32bit is reserved in x86 */
+                    flags |= ACCESSED_ADDR;
+                    if ( value )
+                        PT_LOG("Write up32 addr with %x \n", value);
+                }
+                else
+                {
+                    if ( off == 0xa || off == 0xb )
+                        can_write = 0;
+                    else
+                        flags |= ACCESSED_DATA;
+                    if ( off == 0x9 )
+                        value &= ~PT_MSI_DATA_WR_MASK;
+                }
+                break;
+            case 0xc ... 0xf:
+                if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+                {
+                    if ( off == 0xe || off == 0xf )
+                        can_write = 0;
+                    else
+                    {
+                        flags |= ACCESSED_DATA;
+                        if (off == 0xd)
+                            value &= ~PT_MSI_DATA_WR_MASK;
+                    }
+                }
+                else
+                {
+                    if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+                        flags |= ACCESSED_MASK;
+                    else
+                        PT_LOG("why comes to MASK without mask support??\n");
+                }
+                break;
+            case 0x10 ... 0x13:
+                if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+                {
+                    if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+                        flags |= ACCESSED_MASK;
+                    else
+                        PT_LOG("why comes to MASK without mask support??\n");
+                }
+                else
+                    can_write = 0;
+                break;
+            case 0x14 ... 0x18:
+                can_write = 0;
+                break;
+            default:
+                PT_LOG("Non MSI register!!!\n");
+                break;
+        }
+
+        if ( can_write )
+            dev->config[cur] = value;
+    }
+
+    if ( flags & ACCESSED_DATA || flags & ACCESSED_ADDR )
+        pt_msi_map_update(d, old_data, old_addr);
+
+    if ( flags & ACCESSED_MASK )
+        pt_msi_mask_update(d, old_mask);
+
+    /* This will enable physical one, do it in last step */
+    if ( flags & ACCESSED_CTRL )
+        pt_msi_control_update(d, old_ctrl);
+
+    return 1;
+}
+
+int pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val)
+{
+    int e_addr = addr, e_len = len, offset = 0, i;
+    uint8_t e_val = 0;
+    PCIDevice *pd = (PCIDevice *)d;
+
+    if ( !d || !d->msi )
+        return 0;
+
+    if ( (addr > (d->msi->offset + d->msi->size) ) ||
+         (addr + len) <= d->msi->offset )
+        return 0;
+
+    PT_LOG("pt_msi_read addr %x len %x val %x offset %x size %x\n",
+            addr, len, *val, d->msi->offset, d->msi->size);
+
+    if ( (addr + len ) > (d->msi->offset + d->msi->size) )
+        e_len -= addr + len - d->msi->offset - d->msi->size;
+
+    if ( addr < d->msi->offset )
+    {
+        e_addr = d->msi->offset;
+        offset = d->msi->offset - addr;
+        e_len -= offset;
+    }
+
+    for ( i = 0; i < e_len; i++ )
+    {
+        e_val = *(uint8_t *)(&pd->config[e_addr] + i);
+        *val &= ~(0xff << ( (offset + i) * 8));
+        *val |= (e_val << ( (offset + i) * 8));
+    }
+
+    return e_len;
+}
+
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/ioemu/hw/pt-msi.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ioemu/hw/pt-msi.h   Thu May 01 10:33:03 2008 +0100
@@ -0,0 +1,65 @@
+#ifndef _PT_MSI_H
+#define _PT_MSI_H
+
+#include "vl.h"
+#include "pci/header.h"
+#include "pci/pci.h"
+#include "pass-through.h"
+
+#define MSI_FLAG_UNINIT 0x1000
+#define PT_MSI_MAPPED   0x2000
+
+#define MSI_DATA_VECTOR_SHIFT          0
+#define     MSI_DATA_VECTOR(v)         (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+
+#define MSI_DATA_DELIVERY_SHIFT        8
+#define     MSI_DATA_DELIVERY_FIXED    (0 << MSI_DATA_DELIVERY_SHIFT)
+#define     MSI_DATA_DELIVERY_LOWPRI   (1 << MSI_DATA_DELIVERY_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT           14
+#define     MSI_DATA_LEVEL_DEASSERT    (0 << MSI_DATA_LEVEL_SHIFT)
+#define     MSI_DATA_LEVEL_ASSERT      (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT         15
+#define     MSI_DATA_TRIGGER_EDGE      (0 << MSI_DATA_TRIGGER_SHIFT)
+#define     MSI_DATA_TRIGGER_LEVEL     (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+   + * Shift/mask fields for APIC-based bus address
+   + */
+
+#define MSI_ADDR_HEADER                0xfee00000
+#define MSI_TARGET_CPU_SHIFT                  12
+
+#define MSI_ADDR_DESTID_MASK           0xfff0000f
+#define     MSI_ADDR_DESTID_CPU(cpu)   ((cpu) << MSI_TARGET_CPU_SHIFT)
+
+#define MSI_ADDR_DESTMODE_SHIFT        2
+#define     MSI_ADDR_DESTMODE_PHYS     (0 << MSI_ADDR_DESTMODE_SHIFT)
+#define        MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT     3
+#define     MSI_ADDR_REDIRECTION_CPU   (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define     MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#define PCI_MSI_FLAGS_PVMASK           0x100
+
+#define AUTO_ASSIGN -1
+
+/* shift count for gflags */
+#define GFLAGS_SHIFT_DEST_ID        0
+#define GFLAGS_SHIFT_RH             8
+#define GFLAGS_SHIFT_DM             9
+#define GLFAGS_SHIFT_DELIV_MODE     12
+#define GLFAGS_SHIFT_TRG_MODE       15
+
+int
+pt_msi_init(struct pt_dev *dev, int pos);
+
+int
+pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len);
+
+int
+pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val);
+
+#endif
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Thu May 01 10:32:10 2008 +0100
+++ b/tools/libxc/xc_domain.c   Thu May 01 10:33:03 2008 +0100
@@ -795,6 +795,32 @@ int xc_deassign_device(
     return do_domctl(xc_handle, &domctl);
 }
 
+int xc_domain_update_msi_irq(
+    int xc_handle,
+    uint32_t domid,
+    uint32_t gvec,
+    uint32_t pirq,
+    uint32_t gflags)
+{
+    int rc;
+    xen_domctl_bind_pt_irq_t *bind;
+
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+    domctl.domain = (domid_t)domid;
+
+    bind = &(domctl.u.bind_pt_irq);
+    bind->hvm_domid = domid;
+    bind->irq_type = PT_IRQ_TYPE_MSI;
+    bind->machine_irq = pirq;
+    bind->u.msi.gvec = gvec;
+    bind->u.msi.gflags = gflags;
+
+    rc = do_domctl(xc_handle, &domctl);
+    return rc;
+}
+
 /* Pass-through: binds machine irq to guests irq */
 int xc_domain_bind_pt_irq(
     int xc_handle,
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/libxc/xc_physdev.c
--- a/tools/libxc/xc_physdev.c  Thu May 01 10:32:10 2008 +0100
+++ b/tools/libxc/xc_physdev.c  Thu May 01 10:33:03 2008 +0100
@@ -45,6 +45,37 @@ int xc_physdev_map_pirq(int xc_handle,
     return rc;
 }
 
+int xc_physdev_map_pirq_msi(int xc_handle,
+                            int domid,
+                            int type,
+                            int index,
+                            int *pirq,
+                            int devfn,
+                            int bus,
+                            int msi_type)
+{
+    int rc;
+    struct physdev_map_pirq map;
+
+    if ( !pirq )
+        return -EINVAL;
+
+    map.domid = domid;
+    map.type = type;
+    map.index = index;
+    map.pirq = *pirq;
+    map.msi_info.devfn = devfn;
+    map.msi_info.bus = bus;
+    map.msi_info.msi = msi_type;
+
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map);
+
+    if ( !rc )
+        *pirq = map.pirq;
+
+    return rc;
+}
+
 int xc_physdev_unmap_pirq(int xc_handle,
                           int domid,
                           int pirq)
@@ -59,3 +90,4 @@ int xc_physdev_unmap_pirq(int xc_handle,
 
     return rc;
 }
+
diff -r a0ebceaf41ff -r ad55c06c9bbc tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Thu May 01 10:32:10 2008 +0100
+++ b/tools/libxc/xenctrl.h     Thu May 01 10:33:03 2008 +0100
@@ -856,6 +856,15 @@ int xc_physdev_map_pirq(int xc_handle,
                         int index,
                         int *pirq);
 
+int xc_physdev_map_pirq_msi(int xc_handle,
+                            int domid,
+                            int type,
+                            int index,
+                            int *pirq,
+                            int devfn,
+                            int bus,
+                            int msi_type);
+
 int xc_physdev_unmap_pirq(int xc_handle,
                           int domid,
                           int pirq);
@@ -959,6 +968,13 @@ int xc_domain_ioport_mapping(int xc_hand
                              uint32_t first_mport,
                              uint32_t nr_ports,
                              uint32_t add_mapping);
+
+int xc_domain_update_msi_irq(
+    int xc_handle,
+    uint32_t domid,
+    uint32_t gvec,
+    uint32_t pirq,
+    uint32_t gflags);
 
 int xc_domain_bind_pt_irq(int xc_handle,
                           uint32_t domid,
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/arch/x86/hvm/Makefile
--- a/xen/arch/x86/hvm/Makefile Thu May 01 10:32:10 2008 +0100
+++ b/xen/arch/x86/hvm/Makefile Thu May 01 10:33:03 2008 +0100
@@ -16,4 +16,5 @@ obj-y += vlapic.o
 obj-y += vlapic.o
 obj-y += vpic.o
 obj-y += save.o
+obj-y += vmsi.o
 obj-y += stdvga.o
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Thu May 01 10:32:10 2008 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Thu May 01 10:33:03 2008 +0100
@@ -476,6 +476,9 @@ void vlapic_EOI_set(struct vlapic *vlapi
 
     if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
         vioapic_update_EOI(vlapic_domain(vlapic), vector);
+       
+    if ( vtd_enabled )
+        hvm_dpci_msi_eoi(current->domain, vector);
 }
 
 static int vlapic_ipi(
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/arch/x86/hvm/vmsi.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/vmsi.c   Thu May 01 10:33:03 2008 +0100
@@ -0,0 +1,189 @@
+/*
+ *  Copyright (C) 2001  MandrakeSoft S.A.
+ *
+ *    MandrakeSoft S.A.
+ *    43, rue d'Aboukir
+ *    75002 Paris - France
+ *    http://www.linux-mandrake.com/
+ *    http://www.mandrakesoft.com/
+ *
+ *  This library is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU Lesser General Public
+ *  License as published by the Free Software Foundation; either
+ *  version 2 of the License, or (at your option) any later version.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public
+ *  License along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Support for virtual MSI logic
+ * Will be merged it with virtual IOAPIC logic, since most is the same
+*/
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/mm.h>
+#include <xen/xmalloc.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <public/hvm/ioreq.h>
+#include <asm/hvm/io.h>
+#include <asm/hvm/vpic.h>
+#include <asm/hvm/vlapic.h>
+#include <asm/hvm/support.h>
+#include <asm/current.h>
+#include <asm/event.h>
+
+static uint32_t vmsi_get_delivery_bitmask(
+    struct domain *d, uint16_t dest, uint8_t dest_mode)
+{
+    uint32_t mask = 0;
+    struct vcpu *v;
+
+    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
+                "dest %d dest_mode %d\n", dest, dest_mode);
+
+    if ( dest_mode == 0 ) /* Physical mode. */
+    {
+        if ( dest == 0xFF ) /* Broadcast. */
+        {
+            for_each_vcpu ( d, v )
+                mask |= 1 << v->vcpu_id;
+            goto out;
+        }
+
+        for_each_vcpu ( d, v )
+        {
+            if ( VLAPIC_ID(vcpu_vlapic(v)) == dest )
+            {
+                mask = 1 << v->vcpu_id;
+                break;
+            }
+        }
+    }
+    else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
+    {
+        for_each_vcpu ( d, v )
+            if ( vlapic_match_logical_addr(vcpu_vlapic(v), dest) )
+                mask |= 1 << v->vcpu_id;
+    }
+
+ out:
+    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask mask %x\n",
+                mask);
+    return mask;
+}
+
+static void vmsi_inj_irq(
+    struct domain *d,
+    struct vlapic *target,
+    uint8_t vector,
+    uint8_t trig_mode,
+    uint8_t delivery_mode)
+{
+    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
+                "irq %d trig %d delive mode %d\n",
+                vector, trig_mode, delivery_mode);
+
+    switch ( delivery_mode )
+    {
+    case dest_Fixed:
+    case dest_LowestPrio:
+        if ( vlapic_set_irq(target, vector, trig_mode) )
+            vcpu_kick(vlapic_vcpu(target));
+        break;
+    default:
+        gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode);
+        break;
+    }
+}
+
+#define VMSI_DEST_ID_MASK 0xff
+#define VMSI_RH_MASK      0x100
+#define VMSI_DM_MASK      0x200
+#define VMSI_DELIV_MASK   0x7000
+#define VMSI_TRIG_MODE    0x8000
+
+int vmsi_deliver(struct domain *d, int pirq)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+    int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+    uint16_t dest = flags & VMSI_DEST_ID_MASK;
+    uint8_t dest_mode = flags & VMSI_DM_MASK;
+    uint8_t delivery_mode = flags & VMSI_DELIV_MASK;
+    uint8_t trig_mode = flags & VMSI_TRIG_MODE;
+    uint32_t deliver_bitmask;
+    struct vlapic *target;
+    struct vcpu *v;
+
+    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
+                "msi: dest=%x dest_mode=%x delivery_mode=%x "
+                "vector=%x trig_mode=%x\n",
+                dest, dest_mode, delivery_mode, vector, trig_mode);
+
+    if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
+    {
+        gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
+        return 0;
+    }
+
+    deliver_bitmask = vmsi_get_delivery_bitmask(d, dest, dest_mode);
+    if ( !deliver_bitmask )
+    {
+        HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
+                    "no target on destination\n");
+        return 0;
+    }
+
+    switch ( delivery_mode )
+    {
+    case dest_LowestPrio:
+    {
+        target = apic_round_robin(d, vector, deliver_bitmask);
+        if ( target != NULL )
+            vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
+        else
+            HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
+                        "mask=%x vector=%x delivery_mode=%x\n",
+                        deliver_bitmask, vector, dest_LowestPrio);
+        break;
+    }
+
+    case dest_Fixed:
+    case dest_ExtINT:
+    {
+        uint8_t bit;
+        for ( bit = 0; deliver_bitmask != 0; bit++ )
+        {
+            if ( !(deliver_bitmask & (1 << bit)) )
+                continue;
+            deliver_bitmask &= ~(1 << bit);
+            v = d->vcpu[bit];
+            if ( v != NULL )
+            {
+                target = vcpu_vlapic(v);
+                vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
+            }
+        }
+        break;
+    }
+
+    case dest_SMI:
+    case dest_NMI:
+    case dest_INIT:
+    case dest__reserved_2:
+    default:
+        gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
+                 delivery_mode);
+        break;
+    }
+    return 1;
+}
+
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Thu May 01 10:32:10 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Thu May 01 10:33:03 2008 +0100
@@ -103,6 +103,12 @@ static void enable_intr_window(struct vc
     }
 }
 
+extern int vmsi_deliver(struct domain *d, int pirq);
+static int hvm_pci_msi_assert(struct domain *d, int pirq)
+{
+    return vmsi_deliver(d, pirq);
+}
+
 static void vmx_dirq_assist(struct vcpu *v)
 {
     unsigned int irq;
@@ -120,6 +126,12 @@ static void vmx_dirq_assist(struct vcpu 
     {
         if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
             continue;
+
+               if ( test_bit(_HVM_IRQ_DPCI_MSI, 
&hvm_irq_dpci->mirq[irq].flags) )
+               {
+                       hvm_pci_msi_assert(d, irq);
+                       continue;
+               }
 
         stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
 
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Thu May 01 10:32:10 2008 +0100
+++ b/xen/drivers/passthrough/io.c      Thu May 01 10:33:03 2008 +0100
@@ -71,44 +71,59 @@ int pt_irq_create_bind_vtd(
             xfree(hvm_irq_dpci);
     }
 
-    machine_gsi = pt_irq_bind->machine_irq;
-    device = pt_irq_bind->u.pci.device;
-    intx = pt_irq_bind->u.pci.intx;
-    guest_gsi = hvm_pci_intx_gsi(device, intx);
-    link = hvm_pci_intx_link(device, intx);
-    hvm_irq_dpci->link_cnt[link]++;
-
-    digl = xmalloc(struct dev_intx_gsi_link);
-    if ( !digl )
-        return -ENOMEM;
-
-    digl->device = device;
-    digl->intx = intx;
-    digl->gsi = guest_gsi;
-    digl->link = link;
-    list_add_tail(&digl->list,
-                  &hvm_irq_dpci->mirq[machine_gsi].digl_list);
-
-    hvm_irq_dpci->girq[guest_gsi].valid = 1;
-    hvm_irq_dpci->girq[guest_gsi].device = device;
-    hvm_irq_dpci->girq[guest_gsi].intx = intx;
-    hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
-
-    /* Bind the same mirq once in the same domain */
-    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
-    {
-        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
-        hvm_irq_dpci->mirq[machine_gsi].dom = d;
-
-        init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)],
-                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
-        /* Deal with gsi for legacy devices */
-        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
-    }
-
-    gdprintk(XENLOG_INFO VTDPREFIX,
-             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
-             machine_gsi, device, intx);
+    if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
+    {
+        int pirq = pt_irq_bind->machine_irq;
+
+        hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI 
;
+        hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+        hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+
+        hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
+
+        pirq_guest_bind(d->vcpu[0], pirq, BIND_PIRQ__WILL_SHARE);
+    }
+    else
+    {
+        machine_gsi = pt_irq_bind->machine_irq;
+        device = pt_irq_bind->u.pci.device;
+        intx = pt_irq_bind->u.pci.intx;
+        guest_gsi = hvm_pci_intx_gsi(device, intx);
+        link = hvm_pci_intx_link(device, intx);
+        hvm_irq_dpci->link_cnt[link]++;
+
+        digl = xmalloc(struct dev_intx_gsi_link);
+        if ( !digl )
+            return -ENOMEM;
+
+        digl->device = device;
+        digl->intx = intx;
+        digl->gsi = guest_gsi;
+        digl->link = link;
+        list_add_tail(&digl->list,
+                      &hvm_irq_dpci->mirq[machine_gsi].digl_list);
+
+        hvm_irq_dpci->girq[guest_gsi].valid = 1;
+        hvm_irq_dpci->girq[guest_gsi].device = device;
+        hvm_irq_dpci->girq[guest_gsi].intx = intx;
+        hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
+
+        /* Bind the same mirq once in the same domain */
+        if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+        {
+            hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
+            hvm_irq_dpci->mirq[machine_gsi].dom = d;
+
+            init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)],
+                       pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
+            /* Deal with gsi for legacy devices */
+            pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
+        }
+
+        gdprintk(XENLOG_INFO VTDPREFIX,
+                 "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
+                 machine_gsi, device, intx);
+    }
     return 0;
 }
 
@@ -139,7 +154,7 @@ int pt_irq_destroy_bind_vtd(
            sizeof(struct hvm_girq_dpci_mapping));
 
     /* clear the mirq info */
-    if ( hvm_irq_dpci->mirq[machine_gsi].valid )
+    if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
     {
         list_for_each_safe ( digl_list, tmp,
                 &hvm_irq_dpci->mirq[machine_gsi].digl_list )
@@ -161,7 +176,7 @@ int pt_irq_destroy_bind_vtd(
             pirq_guest_unbind(d, machine_gsi);
             kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)]);
             hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
-            hvm_irq_dpci->mirq[machine_gsi].valid = 0;
+            hvm_irq_dpci->mirq[machine_gsi].flags = 0;
         }
     }
 
@@ -177,7 +192,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
     struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
 
     if ( !iommu_enabled || (d == dom0) || !dpci ||
-         !dpci->mirq[mirq].valid )
+         !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
         return 0;
 
     /*
@@ -187,11 +202,28 @@ int hvm_do_IRQ_dpci(struct domain *d, un
      * PIC) and we need to detect that.
      */
     set_bit(mirq, dpci->dirq_mask);
-    set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
-              NOW() + PT_IRQ_TIME_OUT);
+       if ( !test_bit(_HVM_IRQ_DPCI_MSI, &dpci->mirq[mirq].flags) )
+               set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
+                                 NOW() + PT_IRQ_TIME_OUT);
     vcpu_kick(d->vcpu[0]);
 
     return 1;
+}
+
+
+void hvm_dpci_msi_eoi(struct domain *d, int vector)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    int pirq;
+
+    if ( !vtd_enabled || (hvm_irq_dpci == NULL) )
+       return;
+
+    pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
+    if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
+         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
+         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
+         pirq_guest_eoi(d, pirq);
 }
 
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Thu May 01 10:32:10 2008 +0100
+++ b/xen/drivers/passthrough/iommu.c   Thu May 01 10:33:03 2008 +0100
@@ -77,7 +77,7 @@ void iommu_domain_destroy(struct domain 
     {
         for ( i = 0; i < NR_IRQS; i++ )
         {
-            if ( !hvm_irq_dpci->mirq[i].valid )
+            if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
                 continue;
 
             pirq_guest_unbind(d, i);
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Thu May 01 10:32:10 2008 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Thu May 01 10:33:03 2008 +0100
@@ -101,7 +101,7 @@ void hvm_dpci_isairq_eoi(struct domain *
     /* Multiple mirq may be mapped to one isa irq */
     for ( i = 0; i < NR_IRQS; i++ )
     {
-        if ( !dpci->mirq[i].valid )
+        if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
             continue;
 
         list_for_each_entry_safe ( digl, tmp,
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Thu May 01 10:32:10 2008 +0100
+++ b/xen/include/asm-x86/hvm/io.h      Thu May 01 10:33:03 2008 +0100
@@ -120,5 +120,6 @@ void stdvga_init(struct domain *d);
 void stdvga_init(struct domain *d);
 void stdvga_deinit(struct domain *d);
 
+extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
 #endif /* __ASM_X86_HVM_IO_H__ */
 
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h     Thu May 01 10:32:10 2008 +0100
+++ b/xen/include/asm-x86/hvm/irq.h     Thu May 01 10:33:03 2008 +0100
@@ -38,11 +38,21 @@ struct dev_intx_gsi_link {
     uint8_t link;
 };
 
+#define HVM_IRQ_DPCI_VALID 0x1
+#define HVM_IRQ_DPCI_MSI   0x2
+#define _HVM_IRQ_DPCI_MSI  0x1
+
+struct hvm_gmsi_info {
+    uint32_t gvec;
+    uint32_t gflags;
+};
+
 struct hvm_mirq_dpci_mapping {
-    uint8_t valid;
+    uint32_t flags;
     int pending;
     struct list_head digl_list;
     struct domain *dom;
+    struct hvm_gmsi_info gmsi;
 };
 
 struct hvm_girq_dpci_mapping {
@@ -60,6 +70,7 @@ struct hvm_irq_dpci {
     struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
     /* Guest IRQ to guest device/intx mapping. */
     struct hvm_girq_dpci_mapping girq[NR_IRQS];
+    uint8_t msi_gvec_pirq[NR_VECTORS];
     DECLARE_BITMAP(dirq_mask, NR_IRQS);
     /* Record of mapped ISA IRQs */
     DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
diff -r a0ebceaf41ff -r ad55c06c9bbc xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu May 01 10:32:10 2008 +0100
+++ b/xen/include/public/domctl.h       Thu May 01 10:33:03 2008 +0100
@@ -454,7 +454,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_assig
 #define XEN_DOMCTL_unbind_pt_irq     48
 typedef enum pt_irq_type_e {
     PT_IRQ_TYPE_PCI,
-    PT_IRQ_TYPE_ISA
+    PT_IRQ_TYPE_ISA,
+    PT_IRQ_TYPE_MSI,
 } pt_irq_type_t;
 struct xen_domctl_bind_pt_irq {
     uint32_t machine_irq;
@@ -470,6 +471,10 @@ struct xen_domctl_bind_pt_irq {
             uint8_t device;
             uint8_t intx;
         } pci;
+        struct {
+            uint8_t gvec;
+            uint32_t gflags;
+        } msi;
     } u;
 };
 typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.