[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 7/8] AMD/IOMMU: allocate one device table per PCI segment


  • To: 'Jan Beulich' <jbeulich@xxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
  • Date: Mon, 23 Sep 2019 16:30:49 +0000
  • Accept-language: en-GB, en-US
  • Authentication-results: esa6.hc3370-68.iphmx.com; dkim=none (message not signed) header.i=none; spf=None smtp.pra=Paul.Durrant@xxxxxxxxxx; spf=Pass smtp.mailfrom=Paul.Durrant@xxxxxxxxxx; spf=None smtp.helo=postmaster@xxxxxxxxxxxxxxx
  • Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>, Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
  • Delivery-date: Mon, 23 Sep 2019 16:30:57 +0000
  • Ironport-sdr: Wnk4wuT6vnP8pTMKrdnhyqcMPyKauQIl70ROOPVVWJc4Jqkj4X2VN40CnT4wbqobt8rXmiTUGd A+m2SZSub6rkA5SLJXV5Lr5mLuqB9PY6/IM5BAjn6heSzeF8jh6pm2UK1uPu8s2GysjjdhtByo LlD50JMGv9TU4GFIwuCpK8NUW7gWAkWIqQ8IOhtcLVkysTENILtompQGX2CVlkq+BCF0RV7SNJ s82J7hdDuZ2vw9ZBKJEp2AXZCFvoMc+2iDDDsCWCiOKS0JkmU5TAjlL0M59Cp5zmb664lUIYoh bKc=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVbu3HiqLbXDTCTE6cw97y+FIVyqc5egiA
  • Thread-topic: [Xen-devel] [PATCH v6 7/8] AMD/IOMMU: allocate one device table per PCI segment

> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of Jan 
> Beulich
> Sent: 19 September 2019 14:25
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Suravee Suthikulpanit 
> <suravee.suthikulpanit@xxxxxxx>
> Subject: [Xen-devel] [PATCH v6 7/8] AMD/IOMMU: allocate one device table per 
> PCI segment
> 
> Having a single device table for all segments can't possibly be right.

The copy of the spec. I have says (on page 253: Fixed-Length IVHD Blocks) that 
IVHD entries must have a segment group of 0, so can't the code just require 
iommu->seg == 0?

  Paul

> (Even worse, the symbol wasn't static despite being used in just one
> source file.) Attach the device tables to their respective IVRS mapping
> ones.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v6: New.
> 
> ---
>  xen/drivers/passthrough/amd/iommu_init.c |   81 
> ++++++++++++++++---------------
>  1 file changed, 43 insertions(+), 38 deletions(-)
> 
> --- a/xen/drivers/passthrough/amd/iommu_init.c
> +++ b/xen/drivers/passthrough/amd/iommu_init.c
> @@ -39,7 +39,6 @@ unsigned int __read_mostly ivrs_bdf_entr
>  u8 __read_mostly ivhd_type;
>  static struct radix_tree_root ivrs_maps;
>  LIST_HEAD_READ_MOSTLY(amd_iommu_head);
> -struct table_struct device_table;
>  bool_t iommuv2_enabled;
> 
>  static bool iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
> @@ -989,6 +988,12 @@ static void disable_iommu(struct amd_iom
>      spin_unlock_irqrestore(&iommu->lock, flags);
>  }
> 
> +static unsigned int __init dt_alloc_size(void)
> +{
> +    return PAGE_SIZE << get_order_from_bytes(ivrs_bdf_entries *
> +                                             IOMMU_DEV_TABLE_ENTRY_SIZE);
> +}
> +
>  static void __init deallocate_buffer(void *buf, uint32_t sz)
>  {
>      int order = 0;
> @@ -999,12 +1004,6 @@ static void __init deallocate_buffer(voi
>      }
>  }
> 
> -static void __init deallocate_device_table(struct table_struct *table)
> -{
> -    deallocate_buffer(table->buffer, table->alloc_size);
> -    table->buffer = NULL;
> -}
> -
>  static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
>  {
>      deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size);
> @@ -1068,8 +1067,29 @@ static void * __init allocate_ppr_log(st
>                                  IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log");
>  }
> 
> +/*
> + * Within ivrs_mappings[] we allocate an extra array element to store
> + * - segment number,
> + * - device table.
> + */
> +#define IVRS_MAPPINGS_SEG(m) (m)[ivrs_bdf_entries].dte_requestor_id
> +#define IVRS_MAPPINGS_DEVTAB(m) (m)[ivrs_bdf_entries].intremap_table
> +
> +static void __init free_ivrs_mapping(void *ptr)
> +{
> +    const struct ivrs_mappings *ivrs_mappings = ptr;
> +
> +    if ( IVRS_MAPPINGS_DEVTAB(ivrs_mappings) )
> +        deallocate_buffer(IVRS_MAPPINGS_DEVTAB(ivrs_mappings),
> +                          dt_alloc_size());
> +
> +    xfree(ptr);
> +}
> +
>  static int __init amd_iommu_init_one(struct amd_iommu *iommu, bool intr)
>  {
> +    const struct ivrs_mappings *ivrs_mappings;
> +
>      if ( allocate_cmd_buffer(iommu) == NULL )
>          goto error_out;
> 
> @@ -1082,13 +1102,15 @@ static int __init amd_iommu_init_one(str
>      if ( intr && !set_iommu_interrupt_handler(iommu) )
>          goto error_out;
> 
> -    /* To make sure that device_table.buffer has been successfully allocated 
> */
> -    if ( device_table.buffer == NULL )
> +    /* Make sure that the device table has been successfully allocated. */
> +    ivrs_mappings = get_ivrs_mappings(iommu->seg);
> +    if ( !IVRS_MAPPINGS_DEVTAB(ivrs_mappings) )
>          goto error_out;
> 
> -    iommu->dev_table.alloc_size = device_table.alloc_size;
> -    iommu->dev_table.entries = device_table.entries;
> -    iommu->dev_table.buffer = device_table.buffer;
> +    iommu->dev_table.alloc_size = dt_alloc_size();
> +    iommu->dev_table.entries = iommu->dev_table.alloc_size /
> +                               IOMMU_DEV_TABLE_ENTRY_SIZE;
> +    iommu->dev_table.buffer = IVRS_MAPPINGS_DEVTAB(ivrs_mappings);
> 
>      enable_iommu(iommu);
>      printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
> @@ -1135,11 +1157,8 @@ static void __init amd_iommu_init_cleanu
>          xfree(iommu);
>      }
> 
> -    /* free device table */
> -    deallocate_device_table(&device_table);
> -
> -    /* free ivrs_mappings[] */
> -    radix_tree_destroy(&ivrs_maps, xfree);
> +    /* Free ivrs_mappings[] and their device tables. */
> +    radix_tree_destroy(&ivrs_maps, free_ivrs_mapping);
> 
>      iommu_enabled = 0;
>      iommu_hwdom_passthrough = false;
> @@ -1147,12 +1166,6 @@ static void __init amd_iommu_init_cleanu
>      iommuv2_enabled = 0;
>  }
> 
> -/*
> - * We allocate an extra array element to store the segment number
> - * (and in the future perhaps other global information).
> - */
> -#define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id
> -
>  struct ivrs_mappings *get_ivrs_mappings(u16 seg)
>  {
>      return radix_tree_lookup(&ivrs_maps, seg);
> @@ -1235,24 +1248,18 @@ static int __init alloc_ivrs_mappings(u1
>  static int __init amd_iommu_setup_device_table(
>      u16 seg, struct ivrs_mappings *ivrs_mappings)
>  {
> +    struct amd_iommu_dte *dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings);
>      unsigned int bdf;
> 
>      BUG_ON( (ivrs_bdf_entries == 0) );
> 
> -    if ( !device_table.buffer )
> +    if ( !dt )
>      {
>          /* allocate 'device table' on a 4K boundary */
> -        device_table.alloc_size = PAGE_SIZE <<
> -                                  get_order_from_bytes(
> -                                  PAGE_ALIGN(ivrs_bdf_entries *
> -                                  IOMMU_DEV_TABLE_ENTRY_SIZE));
> -        device_table.entries = device_table.alloc_size /
> -                               IOMMU_DEV_TABLE_ENTRY_SIZE;
> -
> -        device_table.buffer = allocate_buffer(device_table.alloc_size,
> -                                              "Device Table");
> +        dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) =
> +            allocate_buffer(dt_alloc_size(), "Device Table");
>      }
> -    if ( !device_table.buffer )
> +    if ( !dt )
>          return -ENOMEM;
> 
>      /* Add device table entries */
> @@ -1260,12 +1267,10 @@ static int __init amd_iommu_setup_device
>      {
>          if ( ivrs_mappings[bdf].valid )
>          {
> -            void *dte;
>              const struct pci_dev *pdev = NULL;
> 
>              /* add device table entry */
> -            dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE);
> -            iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]);
> +            iommu_dte_add_device_entry(&dt[bdf], &ivrs_mappings[bdf]);
> 
>              if ( iommu_intremap &&
>                   ivrs_mappings[bdf].dte_requestor_id == bdf &&
> @@ -1308,7 +1313,7 @@ static int __init amd_iommu_setup_device
>              }
> 
>              amd_iommu_set_intremap_table(
> -                dte, ivrs_mappings[bdf].intremap_table,
> +                &dt[bdf], ivrs_mappings[bdf].intremap_table,
>                  ivrs_mappings[bdf].iommu, iommu_intremap);
>          }
>      }
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxxx
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.