[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 1/3] IOMMU: allow MSI message to IRTE propagation to fail



Acked, Thanks!
Xiantao

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: Friday, April 12, 2013 6:23 PM
> To: xen-devel; Jan Beulich
> Cc: Jacob Shin; suravee.suthikulpanit@xxxxxxx; Zhang, Xiantao
> Subject: [PATCH v3 1/3] IOMMU: allow MSI message to IRTE propagation to fail
> 
> With the need to allocate multiple contiguous IRTEs for multi-vector
> MSI, the chance of failure here increases. While on the AMD side
> there's no allocation of IRTEs at present at all (and hence no way for
> this allocation to fail, which is going to change with a later patch in
> this series), VT-d already ignores an eventual error here, which this
> patch fixes.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v3: Introduce _find_iommu_for_device() to take care of filtering out
>     the case where an MSI is being set up for the IOMMU itself.
> 
> --- a/xen/arch/x86/hpet.c
> +++ b/xen/arch/x86/hpet.c
> @@ -254,13 +254,22 @@ static void hpet_msi_mask(struct irq_des
>      ch->msi.msi_attrib.masked = 1;
>  }
> 
> -static void hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg
> *msg)
> +static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg
> *msg)
>  {
>      ch->msi.msg = *msg;
> +
>      if ( iommu_intremap )
> -        iommu_update_ire_from_msi(&ch->msi, msg);
> +    {
> +        int rc = iommu_update_ire_from_msi(&ch->msi, msg);
> +
> +        if ( rc )
> +            return rc;
> +    }
> +
>      hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
>      hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
> +
> +    return 0;
>  }
> 
>  static void __maybe_unused
> @@ -318,12 +327,12 @@ static hw_irq_controller hpet_msi_type =
>      .set_affinity   = hpet_msi_set_affinity,
>  };
> 
> -static void __hpet_setup_msi_irq(struct irq_desc *desc)
> +static int __hpet_setup_msi_irq(struct irq_desc *desc)
>  {
>      struct msi_msg msg;
> 
>      msi_compose_msg(desc, &msg);
> -    hpet_msi_write(desc->action->dev_id, &msg);
> +    return hpet_msi_write(desc->action->dev_id, &msg);
>  }
> 
>  static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch)
> @@ -347,6 +356,8 @@ static int __init hpet_setup_msi_irq(str
> 
>      desc->handler = &hpet_msi_type;
>      ret = request_irq(ch->msi.irq, hpet_interrupt_handler, 0, "HPET", ch);
> +    if ( ret >= 0 )
> +        ret = __hpet_setup_msi_irq(desc);
>      if ( ret < 0 )
>      {
>          if ( iommu_intremap )
> @@ -354,7 +365,6 @@ static int __init hpet_setup_msi_irq(str
>          return ret;
>      }
> 
> -    __hpet_setup_msi_irq(desc);
>      desc->msi_desc = &ch->msi;
> 
>      return 0;
> --- a/xen/arch/x86/irq.c
> +++ b/xen/arch/x86/irq.c
> @@ -1938,7 +1938,14 @@ int map_domain_pirq(
>          if ( desc->handler != &no_irq_type )
>              dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
>                      d->domain_id, irq);
> -        setup_msi_handler(desc, msi_desc);
> +
> +        ret = setup_msi_irq(desc, msi_desc);
> +        if ( ret )
> +        {
> +            spin_unlock_irqrestore(&desc->lock, flags);
> +            pci_disable_msi(msi_desc);
> +            goto done;
> +        }
> 
>          if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV
>               && !desc->arch.used_vectors )
> @@ -1954,7 +1961,6 @@ int map_domain_pirq(
>          }
> 
>          set_domain_irq_pirq(d, irq, info);
> -        setup_msi_irq(desc);
>          spin_unlock_irqrestore(&desc->lock, flags);
>      }
>      else
> --- a/xen/arch/x86/msi.c
> +++ b/xen/arch/x86/msi.c
> @@ -214,14 +214,18 @@ static void read_msi_msg(struct msi_desc
>          iommu_read_msi_from_ire(entry, msg);
>  }
> 
> -static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
> +static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
>  {
>      entry->msg = *msg;
> 
>      if ( iommu_intremap )
>      {
> +        int rc;
> +
>          ASSERT(msg != &entry->msg);
> -        iommu_update_ire_from_msi(entry, msg);
> +        rc = iommu_update_ire_from_msi(entry, msg);
> +        if ( rc )
> +            return rc;
>      }
> 
>      switch ( entry->msi_attrib.type )
> @@ -264,6 +268,8 @@ static void write_msi_msg(struct msi_des
>      default:
>          BUG();
>      }
> +
> +    return 0;
>  }
> 
>  void set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask)
> @@ -464,19 +470,15 @@ static struct msi_desc* alloc_msi_entry(
>      return entry;
>  }
> 
> -void setup_msi_handler(struct irq_desc *desc, struct msi_desc *msidesc)
> +int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc)
>  {
> +    struct msi_msg msg;
> +
>      desc->msi_desc = msidesc;
>      desc->handler = msi_maskable_irq(msidesc) ? &pci_msi_maskable
>                                                : &pci_msi_nonmaskable;
> -}
> -
> -void setup_msi_irq(struct irq_desc *desc)
> -{
> -    struct msi_msg msg;
> -
>      msi_compose_msg(desc, &msg);
> -    write_msi_msg(desc->msi_desc, &msg);
> +    return write_msi_msg(msidesc, &msg);
>  }
> 
>  int msi_free_irq(struct msi_desc *entry)
> --- a/xen/drivers/passthrough/amd/iommu_intr.c
> +++ b/xen/drivers/passthrough/amd/iommu_intr.c
> @@ -17,6 +17,7 @@
>   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
>   */
> 
> +#include <xen/err.h>
>  #include <xen/sched.h>
>  #include <xen/hvm/iommu.h>
>  #include <asm/amd-iommu.h>
> @@ -359,25 +360,35 @@ done:
>      }
>  }
> 
> -void amd_iommu_msi_msg_update_ire(
> +static struct amd_iommu *_find_iommu_for_device(int seg, int bdf)
> +{
> +    struct amd_iommu *iommu = find_iommu_for_device(seg, bdf);
> +
> +    if ( iommu )
> +        return iommu;
> +
> +    list_for_each_entry ( iommu, &amd_iommu_head, list )
> +        if ( iommu->seg == seg && iommu->bdf == bdf )
> +            return NULL;
> +
> +    AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %04x:%02x:%02x.%u\n",
> +                    seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf));
> +    return ERR_PTR(-EINVAL);
> +}
> +
> +int amd_iommu_msi_msg_update_ire(
>      struct msi_desc *msi_desc, struct msi_msg *msg)
>  {
>      struct pci_dev *pdev = msi_desc->dev;
>      int bdf, seg;
>      struct amd_iommu *iommu;
> 
> -    if ( !iommu_intremap )
> -        return;
> -
>      bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
>      seg = pdev ? pdev->seg : hpet_sbdf.seg;
> 
> -    iommu = find_iommu_for_device(seg, bdf);
> -    if ( !iommu )
> -    {
> -        AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = %#x\n",
> bdf);
> -        return;
> -    }
> +    iommu = _find_iommu_for_device(seg, bdf);
> +    if ( IS_ERR_OR_NULL(iommu) )
> +        return PTR_ERR(iommu);
> 
>      if ( msi_desc->remap_index >= 0 )
>      {
> @@ -395,7 +406,7 @@ void amd_iommu_msi_msg_update_ire(
>      }
> 
>      if ( !msg )
> -        return;
> +        return 0;
> 
>      do {
>          update_intremap_entry_from_msi_msg(iommu, bdf, &msi_desc-
> >remap_index,
> @@ -404,6 +415,8 @@ void amd_iommu_msi_msg_update_ire(
>              break;
>          bdf += pdev->phantom_stride;
>      } while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
> +
> +    return 0;
>  }
> 
>  void amd_iommu_read_msi_from_ire(
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -548,18 +548,20 @@ void iommu_update_ire_from_apic(
>      const struct iommu_ops *ops = iommu_get_ops();
>      ops->update_ire_from_apic(apic, reg, value);
>  }
> -void iommu_update_ire_from_msi(
> +
> +int iommu_update_ire_from_msi(
>      struct msi_desc *msi_desc, struct msi_msg *msg)
>  {
>      const struct iommu_ops *ops = iommu_get_ops();
> -    ops->update_ire_from_msi(msi_desc, msg);
> +    return iommu_intremap ? ops->update_ire_from_msi(msi_desc, msg) : 0;
>  }
> 
>  void iommu_read_msi_from_ire(
>      struct msi_desc *msi_desc, struct msi_msg *msg)
>  {
>      const struct iommu_ops *ops = iommu_get_ops();
> -    ops->read_msi_from_ire(msi_desc, msg);
> +    if ( iommu_intremap )
> +        ops->read_msi_from_ire(msi_desc, msg);
>  }
> 
>  unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
> --- a/xen/drivers/passthrough/vtd/extern.h
> +++ b/xen/drivers/passthrough/vtd/extern.h
> @@ -90,7 +90,7 @@ void io_apic_write_remap_rte(unsigned in
>  struct msi_desc;
>  struct msi_msg;
>  void msi_msg_read_remap_rte(struct msi_desc *, struct msi_msg *);
> -void msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
> +int msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
> 
>  int intel_setup_hpet_msi(struct msi_desc *);
> 
> --- a/xen/drivers/passthrough/vtd/intremap.c
> +++ b/xen/drivers/passthrough/vtd/intremap.c
> @@ -653,7 +653,7 @@ void msi_msg_read_remap_rte(
>          remap_entry_to_msi_msg(drhd->iommu, msg);
>  }
> 
> -void msi_msg_write_remap_rte(
> +int msi_msg_write_remap_rte(
>      struct msi_desc *msi_desc, struct msi_msg *msg)
>  {
>      struct pci_dev *pdev = msi_desc->dev;
> @@ -661,8 +661,8 @@ void msi_msg_write_remap_rte(
> 
>      drhd = pdev ? acpi_find_matched_drhd_unit(pdev)
>                  : hpet_to_drhd(msi_desc->hpet_id);
> -    if ( drhd )
> -        msi_msg_to_remap_entry(drhd->iommu, pdev, msi_desc, msg);
> +    return drhd ? msi_msg_to_remap_entry(drhd->iommu, pdev, msi_desc,
> msg)
> +                : -EINVAL;
>  }
> 
>  int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> @@ -93,7 +93,7 @@ void *amd_iommu_alloc_intremap_table(voi
>  int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
>  void amd_iommu_ioapic_update_ire(
>      unsigned int apic, unsigned int reg, unsigned int value);
> -void amd_iommu_msi_msg_update_ire(
> +int amd_iommu_msi_msg_update_ire(
>      struct msi_desc *msi_desc, struct msi_msg *msg);
>  void amd_iommu_read_msi_from_ire(
>      struct msi_desc *msi_desc, struct msi_msg *msg);
> --- a/xen/include/asm-x86/msi.h
> +++ b/xen/include/asm-x86/msi.h
> @@ -78,8 +78,7 @@ extern int pci_enable_msi(struct msi_inf
>  extern void pci_disable_msi(struct msi_desc *desc);
>  extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off);
>  extern void pci_cleanup_msi(struct pci_dev *pdev);
> -extern void setup_msi_handler(struct irq_desc *, struct msi_desc *);
> -extern void setup_msi_irq(struct irq_desc *);
> +extern int setup_msi_irq(struct irq_desc *, struct msi_desc *);
>  extern void teardown_msi_irq(int irq);
>  extern int msi_free_vector(struct msi_desc *entry);
>  extern int pci_restore_msi_state(struct pci_dev *pdev);
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -106,7 +106,7 @@ struct iommu_ops {
>                          u8 devfn, struct pci_dev *);
>      int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn);
>      void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, 
> unsigned
> int value);
> -    void (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg
> *msg);
> +    int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg
> *msg);
>      void (*read_msi_from_ire)(struct msi_desc *msi_desc, struct msi_msg
> *msg);
>      unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int reg);
>      int (*setup_hpet_msi)(struct msi_desc *);
> @@ -120,7 +120,7 @@ struct iommu_ops {
>  };
> 
>  void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg,
> unsigned int value);
> -void iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg
> *msg);
> +int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg
> *msg);
>  void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg
> *msg);
>  unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
>  int iommu_setup_hpet_msi(struct msi_desc *);
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.