[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 09/15] vtd: add lookup_page method to iommu_ops



> From: Paul Durrant [mailto:paul.durrant@xxxxxxxxxx]
> Sent: Saturday, August 4, 2018 1:22 AM
> 
> This patch adds a new method to the VT-d IOMMU implementation to find
> the
> MFN currently mapped by the specified BFN along with a wrapper function
> in
> generic IOMMU code to call the implementation if it exists.
> 
> This functionality will be used by a subsequent patch.
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
> 
> v3:
>  - Addressed comments from George.
> 
> v2:
>  - Addressed some comments from Jan.
> ---
>  xen/drivers/passthrough/iommu.c     | 11 +++++++++++
>  xen/drivers/passthrough/vtd/iommu.c | 34
> ++++++++++++++++++++++++++++++++++
>  xen/drivers/passthrough/vtd/iommu.h |  3 +++
>  xen/include/xen/iommu.h             |  4 ++++
>  4 files changed, 52 insertions(+)
> 
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index b10a37e5d7..9b7baca93f 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -305,6 +305,17 @@ int iommu_unmap_page(struct domain *d, bfn_t
> bfn)
>      return rc;
>  }
> 
> +int iommu_lookup_page(struct domain *d, bfn_t bfn, mfn_t *mfn,
> +                      unsigned int *flags)
> +{
> +    const struct domain_iommu *hd = dom_iommu(d);
> +
> +    if ( !iommu_enabled || !hd->platform_ops )
> +        return -EOPNOTSUPP;
> +
> +    return hd->platform_ops->lookup_page(d, bfn, mfn, flags);
> +}
> +
>  static void iommu_free_pagetables(unsigned long unused)
>  {
>      do {
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index 282e227414..8cd3b59aa0 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1830,6 +1830,39 @@ static int __must_check
> intel_iommu_unmap_page(struct domain *d,
>      return dma_pte_clear_one(d, bfn_to_baddr(bfn));
>  }
> 
> +static int intel_iommu_lookup_page(struct domain *d, bfn_t bfn, mfn_t
> *mfn,
> +                                   unsigned int *flags)

Not looking at later patches yet... but in concept bfn address
space is per device instead of per domain. In default situation
(w/o pvIOMMU exposed), all devices assigned to dom0 share
the same address space (bfn=pfn) which is currently linked
from domain structure. Then with pvIOMMU exposed, dom0
starts to manage individual pfn address space (called IOVA
address space within dom0) per assigned device. In that case
lookup should accept a bdf number and then find the right
page table...

> +{
> +    struct domain_iommu *hd = dom_iommu(d);
> +    struct dma_pte *page = NULL, *pte = NULL, val;
> +    u64 pg_maddr;
> +
> +    spin_lock(&hd->arch.mapping_lock);
> +
> +    pg_maddr = addr_to_dma_page_maddr(d, bfn_to_baddr(bfn), 0);
> +    if ( pg_maddr == 0 )
> +    {
> +        spin_unlock(&hd->arch.mapping_lock);
> +        return -ENOMEM;
> +    }
> +
> +    page = map_vtd_domain_page(pg_maddr);
> +    pte = page + (bfn_x(bfn) & LEVEL_MASK);
> +    val = *pte;
> +
> +    unmap_vtd_domain_page(page);
> +    spin_unlock(&hd->arch.mapping_lock);
> +
> +    if ( !dma_pte_present(val) )
> +        return -ENOENT;
> +
> +    *mfn = maddr_to_mfn(dma_pte_addr(val));
> +    *flags = dma_pte_read(val) ? IOMMUF_readable : 0;
> +    *flags |= dma_pte_write(val) ? IOMMUF_writable : 0;
> +
> +    return 0;
> +}
> +
>  int iommu_pte_flush(struct domain *d, uint64_t bfn, uint64_t *pte,
>                      int order, int present)
>  {
> @@ -2661,6 +2694,7 @@ const struct iommu_ops intel_iommu_ops = {
>      .teardown = iommu_domain_teardown,
>      .map_page = intel_iommu_map_page,
>      .unmap_page = intel_iommu_unmap_page,
> +    .lookup_page = intel_iommu_lookup_page,
>      .free_page_table = iommu_free_page_table,
>      .reassign_device = reassign_device_ownership,
>      .get_device_group_id = intel_iommu_group_id,
> diff --git a/xen/drivers/passthrough/vtd/iommu.h
> b/xen/drivers/passthrough/vtd/iommu.h
> index 72c1a2e3cd..47bdfcb5ea 100644
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -272,6 +272,9 @@ struct dma_pte {
>  #define dma_set_pte_prot(p, prot) do { \
>          (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
>      } while (0)
> +#define dma_pte_prot(p) ((p).val & DMA_PTE_PROT)
> +#define dma_pte_read(p) (dma_pte_prot(p) & DMA_PTE_READ)
> +#define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
>  #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
>  #define dma_set_pte_addr(p, addr) do {\
>              (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index cc0be81b4e..7c5d46df81 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -100,6 +100,8 @@ void iommu_teardown(struct domain *d);
>  int __must_check iommu_map_page(struct domain *d, bfn_t bfn,
>                                  mfn_t mfn, unsigned int flags);
>  int __must_check iommu_unmap_page(struct domain *d, bfn_t bfn);
> +int __must_check iommu_lookup_page(struct domain *d, bfn_t bfn,
> mfn_t *mfn,
> +                                   unsigned int *flags);
> 
>  enum iommu_feature
>  {
> @@ -198,6 +200,8 @@ struct iommu_ops {
>      int __must_check (*map_page)(struct domain *d, bfn_t bfn, mfn_t mfn,
>                                   unsigned int flags);
>      int __must_check (*unmap_page)(struct domain *d, bfn_t bfn);
> +    int __must_check (*lookup_page)(struct domain *d, bfn_t bfn, mfn_t
> *mfn,
> +                                    unsigned int *flags);
>      void (*free_page_table)(struct page_info *);
>  #ifdef CONFIG_X86
>      void (*update_ire_from_apic)(unsigned int apic, unsigned int reg,
> unsigned int value);
> --
> 2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.