[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2] amd-iommu: use correct constants in amd_iommu_get_next_table_from_pte()...



Ping? Can I get an ack or otherwise from an AMD maintainer?

> -----Original Message-----
> From: Paul Durrant [mailto:paul.durrant@xxxxxxxxxx]
> Sent: 26 September 2018 14:44
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Suravee Suthikulpanit
> <suravee.suthikulpanit@xxxxxxx>; Brian Woods <brian.woods@xxxxxxx>
> Subject: [PATCH v2] amd-iommu: use correct constants in
> amd_iommu_get_next_table_from_pte()...
> 
> ...and change the name to amd_iommu_get_address_from_pte() since the
> address read is not necessarily the address of a next level page table.
> (If the 'next level' field is not 1 - 6 then the address is a page
> address).
> 
> The constants in use prior to this patch relate to device table entries
> rather than page table entries. Although they do have the same value, it
> makes the code confusing to read.
> 
> This patch also changes the PDE/PTE pointer argument to void *, and
> removes any u32/uint32_t casts in the call sites. Unnecessary casts
> surrounding call sites are also removed.
> 
> No functional change.
> 
> NOTE: The patch also adds emacs boilerplate to iommu_map.c
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> Cc: Brian Woods <brian.woods@xxxxxxx>
> ---
>  xen/drivers/passthrough/amd/iommu_map.c       | 40 ++++++++++++++++------
> -----
>  xen/drivers/passthrough/amd/pci_amd_iommu.c   | 10 +++----
>  xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  2 +-
>  3 files changed, 30 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c
> b/xen/drivers/passthrough/amd/iommu_map.c
> index 70b4345b37..9fa5cd3bd3 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -285,19 +285,18 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id,
> u64 gcr3,
>      dte[1] = entry;
>  }
> 
> -u64 amd_iommu_get_next_table_from_pte(u32 *entry)
> +uint64_t amd_iommu_get_address_from_pte(void *pte)
>  {
> -    u64 addr_lo, addr_hi, ptr;
> +    uint32_t *entry = pte;
> +    uint64_t addr_lo, addr_hi, ptr;
> 
> -    addr_lo = get_field_from_reg_u32(
> -        entry[0],
> -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
> -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
> +    addr_lo = get_field_from_reg_u32(entry[0],
> +                                     IOMMU_PTE_ADDR_LOW_MASK,
> +                                     IOMMU_PTE_ADDR_LOW_SHIFT);
> 
> -    addr_hi = get_field_from_reg_u32(
> -        entry[1],
> -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
> -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
> +    addr_hi = get_field_from_reg_u32(entry[1],
> +                                     IOMMU_PTE_ADDR_HIGH_MASK,
> +                                     IOMMU_PTE_ADDR_HIGH_SHIFT);
> 
>      ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
>      return ptr;
> @@ -350,11 +349,11 @@ static int iommu_update_pde_count(struct domain *d,
> unsigned long pt_mfn,
>      pde = table + pfn_to_pde_idx(gfn, merge_level);
> 
>      /* get page table of next level */
> -    ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
> +    ntable_maddr = amd_iommu_get_address_from_pte(pde);
>      ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
> 
>      /* get the first mfn of next level */
> -    first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >>
> PAGE_SHIFT;
> +    first_mfn = amd_iommu_get_address_from_pte(ntable) >> PAGE_SHIFT;
> 
>      if ( first_mfn == 0 )
>          goto out;
> @@ -401,7 +400,7 @@ static int iommu_merge_pages(struct domain *d,
> unsigned long pt_mfn,
>      pde = table + pfn_to_pde_idx(gfn, merge_level);
> 
>      /* get first mfn */
> -    ntable_mfn = amd_iommu_get_next_table_from_pte((u32*)pde) >>
> PAGE_SHIFT;
> +    ntable_mfn = amd_iommu_get_address_from_pte(pde) >> PAGE_SHIFT;
> 
>      if ( ntable_mfn == 0 )
>      {
> @@ -410,7 +409,7 @@ static int iommu_merge_pages(struct domain *d,
> unsigned long pt_mfn,
>      }
> 
>      ntable = map_domain_page(_mfn(ntable_mfn));
> -    first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >>
> PAGE_SHIFT;
> +    first_mfn = amd_iommu_get_address_from_pte(ntable) >> PAGE_SHIFT;
> 
>      if ( first_mfn == 0 )
>      {
> @@ -468,8 +467,7 @@ static int iommu_pde_from_gfn(struct domain *d,
> unsigned long pfn,
>          pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
> 
>          /* Here might be a super page frame */
> -        next_table_mfn =
> amd_iommu_get_next_table_from_pte((uint32_t*)pde)
> -                         >> PAGE_SHIFT;
> +        next_table_mfn = amd_iommu_get_address_from_pte(pde) >>
> PAGE_SHIFT;
> 
>          /* Split super page frame into smaller pieces.*/
>          if ( iommu_is_pte_present((u32*)pde) &&
> @@ -815,3 +813,13 @@ void amd_iommu_share_p2m(struct domain *d)
>                          mfn_x(pgd_mfn));
>      }
>  }
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> index 4a633ca940..80d9ae6561 100644
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -430,11 +430,11 @@ static void deallocate_page_table(struct page_info
> *pg)
>      for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
>      {
>          pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
> -        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
> -        next_level = iommu_next_level((u32*)pde);
> +        next_table_maddr = amd_iommu_get_address_from_pte(pde);
> +        next_level = iommu_next_level(pde);
> 
>          if ( (next_table_maddr != 0) && (next_level != 0) &&
> -             iommu_is_pte_present((u32*)pde) )
> +             iommu_is_pte_present(pde) )
>          {
>              /* We do not support skip levels yet */
>              ASSERT(next_level == level - 1);
> @@ -559,8 +559,8 @@ static void amd_dump_p2m_table_level(struct page_info*
> pg, int level,
>              process_pending_softirqs();
> 
>          pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
> -        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
> -        entry = (u32*)pde;
> +        next_table_maddr = amd_iommu_get_address_from_pte(pde);
> +        entry = pde;
> 
>          present = get_field_from_reg_u32(entry[0],
>                                           IOMMU_PDE_PRESENT_MASK,
> diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> index 99bc21c7b3..a6ba0445da 100644
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> @@ -55,7 +55,7 @@ int amd_iommu_update_ivrs_mapping_acpi(void);
>  int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
>                                      unsigned long mfn, unsigned int
> flags);
>  int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long
> gfn);
> -u64 amd_iommu_get_next_table_from_pte(u32 *entry);
> +uint64_t amd_iommu_get_address_from_pte(void *pte);
>  int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
>  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
>                                         u64 phys_addr, unsigned long size,
> --
> 2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.