[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 5/7] iommu / vtd: introduce a new 'refcount' flag...
...to mean 'the page (being) mapped is reference counted'. An important pre-requisite for PV-IOMMU mapping is being able to tell the difference between IOMMU entries added at start-of-day by Xen and those that have been added by a PV-IOMMU map operation. The reason for this is that the pages for the former do not have an extra reference taken prior to mapping but the latter will (for safety/security reasons). This patch therefore introduces a new IOMMF_refcount flag that the subsequent patch adding the PV-IOMMU map operation will use to mark entries that it adds. When the VT-d mapping code encounters this flag it will set a bit in the IOMMU PTE that is ignored by the IOMMU itself, such that a subsquent lookup operation can determine whether the mapped page was reference counted or not (and hence forbid a PV-IOMMU unmap operation in the latter case). A subsequent patch will implement a similar PTE bit for AMD IOMMUs. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Kevin Tian <kevin.tian@xxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> v7: - New in v7. --- xen/drivers/passthrough/vtd/iommu.c | 5 +++++ xen/drivers/passthrough/vtd/iommu.h | 17 +++++++++++------ xen/include/xen/iommu.h | 2 ++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index d8873167e1..cb264d8af4 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1810,6 +1810,10 @@ static int __must_check intel_iommu_map_page(struct domain *d, if ( iommu_snoop ) dma_set_pte_snp(new); + /* If the page has referenced for mapping then mark it as such */ + if ( flags & IOMMUF_refcount ) + dma_set_pte_refcnt(new); + if ( old.val == new.val ) { spin_unlock(&hd->arch.mapping_lock); @@ -1879,6 +1883,7 @@ static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn, *mfn = maddr_to_mfn(dma_pte_addr(val)); *flags = dma_pte_read(val) ? IOMMUF_readable : 0; *flags |= dma_pte_write(val) ? IOMMUF_writable : 0; + *flags |= dma_pte_refcnt(val) ? IOMMUF_refcount : 0; return 0; } diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h index 1a992f72d6..880eebaed3 100644 --- a/xen/drivers/passthrough/vtd/iommu.h +++ b/xen/drivers/passthrough/vtd/iommu.h @@ -253,21 +253,25 @@ struct context_entry { * 1: writable * 2-6: reserved * 7: super page - * 8-11: available + * 8-9: reserved + * 10: ignored by h/w (used for refcount flag) + * 11: snoop control * 12-63: Host physcial address */ struct dma_pte { u64 val; }; -#define DMA_PTE_READ (1) -#define DMA_PTE_WRITE (2) -#define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE) -#define DMA_PTE_SP (1 << 7) -#define DMA_PTE_SNP (1 << 11) +#define DMA_PTE_READ (1) +#define DMA_PTE_WRITE (2) +#define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE) +#define DMA_PTE_SP (1 << 7) +#define DMA_PTE_REFCNT (1 << 10) +#define DMA_PTE_SNP (1 << 11) #define dma_clear_pte(p) do {(p).val = 0;} while(0) #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0) #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0) #define dma_set_pte_superpage(p) do {(p).val |= DMA_PTE_SP;} while(0) +#define dma_set_pte_refcnt(p) do {(p).val |= DMA_PTE_REFCNT;} while(0) #define dma_set_pte_snp(p) do {(p).val |= DMA_PTE_SNP;} while(0) #define dma_set_pte_prot(p, prot) do { \ (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \ @@ -280,6 +284,7 @@ struct dma_pte { (p).val |= ((addr) & PAGE_MASK_4K); } while (0) #define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0) #define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0) +#define dma_pte_refcnt(p) (((p).val & DMA_PTE_REFCNT) != 0) /* interrupt remap entry */ struct iremap_entry { diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index 1bf311624c..a56d03b719 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -89,6 +89,8 @@ void iommu_teardown(struct domain *d); #define IOMMUF_readable (1u<<_IOMMUF_readable) #define _IOMMUF_writable 1 #define IOMMUF_writable (1u<<_IOMMUF_writable) +#define _IOMMUF_refcount 2 +#define IOMMUF_refcount (1u<<_IOMMUF_refcount) int __must_check iommu_map_page_nocrash(struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags); int __must_check iommu_map_page(struct domain *d, dfn_t dfn, -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |