|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v11 1/9] iommu: introduce the concept of DFN...
...meaning 'device DMA frame number' i.e. a frame number mapped in the IOMMU
(rather than the MMU) and hence used for DMA address translation.
This patch is a largely cosmetic change that substitutes the terms 'gfn'
and 'gaddr' for 'dfn' and 'daddr' in all the places where the frame number
or address relate to a device rather than the CPU.
The parts that are not purely cosmetic are:
- the introduction of a type-safe declaration of dfn_t and definition of
INVALID_DFN to make the substitution of gfn_x(INVALID_GFN) mechanical.
- the introduction of __dfn_to_daddr and __daddr_to_dfn (and type-safe
variants without the leading __) with some use of the former.
Subsequent patches will convert code to make use of type-safe DFNs.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
v9:
- Re-word comment in mm.h.
- Move definitions relating to daddr into asm-x86/iommu.h since these are
not used by any ARM IOMMU implementation.
- Fix __daddr_to_dfn() to properly parenthesize and remove cast.
v8:
- Correct definition of INVALID_DFN.
- Don't use _AC in definition of IOMMU_PAGE_SIZE.
v7:
- Re-name BFN -> DFN as requested by Jan.
- Dropped Wei's R-b because of name change.
v6:
- Dropped changes to 'mfn' section in xen/mm.h as suggested by Kevin.
v3:
- Get rid of intermediate 'frame' variables again.
v2:
- Addressed comments from Jan.
---
xen/drivers/passthrough/amd/iommu_cmd.c | 18 +++----
xen/drivers/passthrough/amd/iommu_map.c | 78 ++++++++++++++---------------
xen/drivers/passthrough/amd/pci_amd_iommu.c | 2 +-
xen/drivers/passthrough/arm/smmu.c | 16 +++---
xen/drivers/passthrough/iommu.c | 28 +++++------
xen/drivers/passthrough/vtd/iommu.c | 30 +++++------
xen/include/asm-x86/iommu.h | 12 +++++
xen/include/xen/iommu.h | 26 +++++++---
xen/include/xen/mm.h | 5 ++
9 files changed, 123 insertions(+), 92 deletions(-)
diff --git a/xen/drivers/passthrough/amd/iommu_cmd.c
b/xen/drivers/passthrough/amd/iommu_cmd.c
index 08247fa354..d4d071e53e 100644
--- a/xen/drivers/passthrough/amd/iommu_cmd.c
+++ b/xen/drivers/passthrough/amd/iommu_cmd.c
@@ -284,7 +284,7 @@ void invalidate_iommu_all(struct amd_iommu *iommu)
}
void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
- uint64_t gaddr, unsigned int order)
+ daddr_t daddr, unsigned int order)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -315,12 +315,12 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev
*pdev,
/* send INVALIDATE_IOTLB_PAGES command */
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order);
+ invalidate_iotlb_pages(iommu, maxpend, 0, queueid, daddr, req_id, order);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
+static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr,
unsigned int order)
{
struct pci_dev *pdev;
@@ -333,7 +333,7 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d,
uint64_t gaddr,
u8 devfn = pdev->devfn;
do {
- amd_iommu_flush_iotlb(devfn, pdev, gaddr, order);
+ amd_iommu_flush_iotlb(devfn, pdev, daddr, order);
devfn += pdev->phantom_stride;
} while ( devfn != pdev->devfn &&
PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
@@ -342,7 +342,7 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d,
uint64_t gaddr,
/* Flush iommu cache after p2m changes. */
static void _amd_iommu_flush_pages(struct domain *d,
- uint64_t gaddr, unsigned int order)
+ daddr_t daddr, unsigned int order)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -352,13 +352,13 @@ static void _amd_iommu_flush_pages(struct domain *d,
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_iommu_pages(iommu, gaddr, dom_id, order);
+ invalidate_iommu_pages(iommu, daddr, dom_id, order);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
if ( ats_enabled )
- amd_iommu_flush_all_iotlbs(d, gaddr, order);
+ amd_iommu_flush_all_iotlbs(d, daddr, order);
}
void amd_iommu_flush_all_pages(struct domain *d)
@@ -367,9 +367,9 @@ void amd_iommu_flush_all_pages(struct domain *d)
}
void amd_iommu_flush_pages(struct domain *d,
- unsigned long gfn, unsigned int order)
+ unsigned long dfn, unsigned int order)
{
- _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
+ _amd_iommu_flush_pages(d, __dfn_to_daddr(dfn), order);
}
void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c
b/xen/drivers/passthrough/amd/iommu_map.c
index 70b4345b37..61ade71850 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -35,12 +35,12 @@ static unsigned int pfn_to_pde_idx(unsigned long pfn,
unsigned int level)
return idx;
}
-void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn)
+void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long dfn)
{
u64 *table, *pte;
table = map_domain_page(_mfn(l1_mfn));
- pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1);
+ pte = table + pfn_to_pde_idx(dfn, IOMMU_PAGING_MODE_LEVEL_1);
*pte = 0;
unmap_domain_page(table);
}
@@ -104,7 +104,7 @@ static bool_t set_iommu_pde_present(u32 *pde, unsigned long
next_mfn,
return need_flush;
}
-static bool_t set_iommu_pte_present(unsigned long pt_mfn, unsigned long gfn,
+static bool_t set_iommu_pte_present(unsigned long pt_mfn, unsigned long dfn,
unsigned long next_mfn, int pde_level,
bool_t iw, bool_t ir)
{
@@ -114,7 +114,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn,
unsigned long gfn,
table = map_domain_page(_mfn(pt_mfn));
- pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level));
+ pde = (u32*)(table + pfn_to_pde_idx(dfn, pde_level));
need_flush = set_iommu_pde_present(pde, next_mfn,
IOMMU_PAGING_MODE_LEVEL_0, iw, ir);
@@ -331,7 +331,7 @@ static void set_pde_count(u64 *pde, unsigned int count)
* otherwise increase pde count if mfn is contigous with mfn - 1
*/
static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
- unsigned long gfn, unsigned long mfn,
+ unsigned long dfn, unsigned long mfn,
unsigned int merge_level)
{
unsigned int pde_count, next_level;
@@ -347,7 +347,7 @@ static int iommu_update_pde_count(struct domain *d,
unsigned long pt_mfn,
/* get pde at merge level */
table = map_domain_page(_mfn(pt_mfn));
- pde = table + pfn_to_pde_idx(gfn, merge_level);
+ pde = table + pfn_to_pde_idx(dfn, merge_level);
/* get page table of next level */
ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
@@ -362,7 +362,7 @@ static int iommu_update_pde_count(struct domain *d,
unsigned long pt_mfn,
mask = (1ULL<< (PTE_PER_TABLE_SHIFT * next_level)) - 1;
if ( ((first_mfn & mask) == 0) &&
- (((gfn & mask) | first_mfn) == mfn) )
+ (((dfn & mask) | first_mfn) == mfn) )
{
pde_count = get_pde_count(*pde);
@@ -387,7 +387,7 @@ out:
}
static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
- unsigned long gfn, unsigned int flags,
+ unsigned long dfn, unsigned int flags,
unsigned int merge_level)
{
u64 *table, *pde, *ntable;
@@ -398,7 +398,7 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
table = map_domain_page(_mfn(pt_mfn));
- pde = table + pfn_to_pde_idx(gfn, merge_level);
+ pde = table + pfn_to_pde_idx(dfn, merge_level);
/* get first mfn */
ntable_mfn = amd_iommu_get_next_table_from_pte((u32*)pde) >> PAGE_SHIFT;
@@ -436,7 +436,7 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
* {Re, un}mapping super page frames causes re-allocation of io
* page tables.
*/
-static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
+static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
unsigned long pt_mfn[])
{
u64 *pde, *next_table_vaddr;
@@ -465,7 +465,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned
long pfn,
pt_mfn[level] = next_table_mfn;
next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
- pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
+ pde = next_table_vaddr + pfn_to_pde_idx(dfn, level);
/* Here might be a super page frame */
next_table_mfn = amd_iommu_get_next_table_from_pte((uint32_t*)pde)
@@ -477,11 +477,11 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned
long pfn,
next_table_mfn != 0 )
{
int i;
- unsigned long mfn, gfn;
+ unsigned long mfn, pfn;
unsigned int page_sz;
page_sz = 1 << (PTE_PER_TABLE_SHIFT * (next_level - 1));
- gfn = pfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1);
+ pfn = dfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1);
mfn = next_table_mfn;
/* allocate lower level page table */
@@ -499,10 +499,10 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned
long pfn,
for ( i = 0; i < PTE_PER_TABLE_SIZE; i++ )
{
- set_iommu_pte_present(next_table_mfn, gfn, mfn, next_level,
+ set_iommu_pte_present(next_table_mfn, pfn, mfn, next_level,
!!IOMMUF_writable, !!IOMMUF_readable);
mfn += page_sz;
- gfn += page_sz;
+ pfn += page_sz;
}
amd_iommu_flush_all_pages(d);
@@ -540,7 +540,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned
long pfn,
return 0;
}
-static int update_paging_mode(struct domain *d, unsigned long gfn)
+static int update_paging_mode(struct domain *d, unsigned long dfn)
{
u16 bdf;
void *device_entry;
@@ -554,13 +554,13 @@ static int update_paging_mode(struct domain *d, unsigned
long gfn)
unsigned long old_root_mfn;
struct domain_iommu *hd = dom_iommu(d);
- if ( gfn == gfn_x(INVALID_GFN) )
+ if ( dfn == dfn_x(INVALID_DFN) )
return -EADDRNOTAVAIL;
- ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+ ASSERT(!(dfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
level = hd->arch.paging_mode;
old_root = hd->arch.root_table;
- offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
+ offset = dfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
@@ -631,7 +631,7 @@ static int update_paging_mode(struct domain *d, unsigned
long gfn)
return 0;
}
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
unsigned int flags)
{
bool_t need_flush = 0;
@@ -651,34 +651,34 @@ int amd_iommu_map_page(struct domain *d, unsigned long
gfn, unsigned long mfn,
if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Root table alloc failed, gfn = %lx\n", gfn);
+ AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %lx\n", dfn);
domain_crash(d);
return rc;
}
/* Since HVM domain is initialized with 2 level IO page table,
- * we might need a deeper page table for lager gfn now */
+ * we might need a deeper page table for wider dfn now */
if ( is_hvm_domain(d) )
{
- if ( update_paging_mode(d, gfn) )
+ if ( update_paging_mode(d, dfn) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
+ AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
domain_crash(d);
return -EFAULT;
}
}
- if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
+ if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
+ AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
domain_crash(d);
return -EFAULT;
}
/* Install 4k mapping first */
- need_flush = set_iommu_pte_present(pt_mfn[1], gfn, mfn,
+ need_flush = set_iommu_pte_present(pt_mfn[1], dfn, mfn,
IOMMU_PAGING_MODE_LEVEL_1,
!!(flags & IOMMUF_writable),
!!(flags & IOMMUF_readable));
@@ -690,7 +690,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn,
unsigned long mfn,
/* 4K mapping for PV guests never changes,
* no need to flush if we trust non-present bits */
if ( is_hvm_domain(d) )
- amd_iommu_flush_pages(d, gfn, 0);
+ amd_iommu_flush_pages(d, dfn, 0);
for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
merge_level <= hd->arch.paging_mode; merge_level++ )
@@ -698,15 +698,15 @@ int amd_iommu_map_page(struct domain *d, unsigned long
gfn, unsigned long mfn,
if ( pt_mfn[merge_level] == 0 )
break;
if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
- gfn, mfn, merge_level) )
+ dfn, mfn, merge_level) )
break;
- if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn,
+ if ( iommu_merge_pages(d, pt_mfn[merge_level], dfn,
flags, merge_level) )
{
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
- "gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
+ "dfn = %lx mfn = %lx\n", merge_level, dfn, mfn);
domain_crash(d);
return -EFAULT;
}
@@ -720,7 +720,7 @@ out:
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
+int amd_iommu_unmap_page(struct domain *d, unsigned long dfn)
{
unsigned long pt_mfn[7];
struct domain_iommu *hd = dom_iommu(d);
@@ -739,34 +739,34 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long
gfn)
}
/* Since HVM domain is initialized with 2 level IO page table,
- * we might need a deeper page table for lager gfn now */
+ * we might need a deeper page table for lager dfn now */
if ( is_hvm_domain(d) )
{
- int rc = update_paging_mode(d, gfn);
+ int rc = update_paging_mode(d, dfn);
if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
+ AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
if ( rc != -EADDRNOTAVAIL )
domain_crash(d);
return rc;
}
}
- if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
+ if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
+ AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
domain_crash(d);
return -EFAULT;
}
/* mark PTE as 'page not present' */
- clear_iommu_pte_present(pt_mfn[1], gfn);
+ clear_iommu_pte_present(pt_mfn[1], dfn);
spin_unlock(&hd->arch.mapping_lock);
- amd_iommu_flush_pages(d, gfn, 0);
+ amd_iommu_flush_pages(d, dfn, 0);
return 0;
}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 4a633ca940..aa9eba02bd 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -587,7 +587,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg,
int level,
maddr_to_page(next_table_maddr), next_level,
address, indent + 1);
else
- printk("%*sgfn: %08lx mfn: %08lx\n",
+ printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
(unsigned long)PFN_DOWN(address),
(unsigned long)PFN_DOWN(next_table_maddr));
diff --git a/xen/drivers/passthrough/arm/smmu.c
b/xen/drivers/passthrough/arm/smmu.c
index 8f91807b1b..1eda96a72a 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2551,7 +2551,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct
domain *d)
}
static int __must_check arm_smmu_iotlb_flush(struct domain *d,
- unsigned long gfn,
+ unsigned long dfn,
unsigned int page_count)
{
/* ARM SMMU v1 doesn't have flush by VMA and VMID */
@@ -2748,7 +2748,7 @@ static void arm_smmu_iommu_domain_teardown(struct domain
*d)
xfree(xen_domain);
}
-static int __must_check arm_smmu_map_page(struct domain *d, unsigned long gfn,
+static int __must_check arm_smmu_map_page(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags)
{
p2m_type_t t;
@@ -2759,10 +2759,10 @@ static int __must_check arm_smmu_map_page(struct domain
*d, unsigned long gfn,
* protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain
* p2m to allow DMA request to work.
* This is only valid when the domain is directed mapped. Hence this
- * function should only be used by gnttab code with gfn == mfn.
+ * function should only be used by gnttab code with gfn == mfn == dfn.
*/
BUG_ON(!is_domain_direct_mapped(d));
- BUG_ON(mfn != gfn);
+ BUG_ON(mfn != dfn);
/* We only support readable and writable flags */
if (!(flags & (IOMMUF_readable | IOMMUF_writable)))
@@ -2774,19 +2774,19 @@ static int __must_check arm_smmu_map_page(struct domain
*d, unsigned long gfn,
* The function guest_physmap_add_entry replaces the current mapping
* if there is already one...
*/
- return guest_physmap_add_entry(d, _gfn(gfn), _mfn(mfn), 0, t);
+ return guest_physmap_add_entry(d, _gfn(dfn), _mfn(dfn), 0, t);
}
-static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long
gfn)
+static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long
dfn)
{
/*
* This function should only be used by gnttab code when the domain
- * is direct mapped
+ * is direct mapped (i.e. gfn == mfn == dfn).
*/
if ( !is_domain_direct_mapped(d) )
return -EINVAL;
- return guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
+ return guest_physmap_remove_page(d, _gfn(dfn), _mfn(dfn), 0);
}
static const struct iommu_ops arm_smmu_iommu_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index ae6cf2f0ff..1ad77a7e7a 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -215,7 +215,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = mfn_x(page_to_mfn(page));
- unsigned long gfn = mfn_to_gmfn(d, mfn);
+ unsigned long dfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
int ret;
@@ -224,7 +224,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+ ret = hd->platform_ops->map_page(d, dfn, mfn, mapping);
if ( !rc )
rc = ret;
@@ -285,7 +285,7 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+int iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
@@ -294,13 +294,13 @@ int iommu_map_page(struct domain *d, unsigned long gfn,
unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, gfn, mfn, flags);
+ rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU mapping gfn %#lx to mfn %#lx failed: %d\n",
- d->domain_id, gfn, mfn, rc);
+ "d%d: IOMMU mapping dfn %#lx to mfn %#lx failed: %d\n",
+ d->domain_id, dfn, mfn, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -309,7 +309,7 @@ int iommu_map_page(struct domain *d, unsigned long gfn,
unsigned long mfn,
return rc;
}
-int iommu_unmap_page(struct domain *d, unsigned long gfn)
+int iommu_unmap_page(struct domain *d, unsigned long dfn)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -317,13 +317,13 @@ int iommu_unmap_page(struct domain *d, unsigned long gfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, gfn);
+ rc = hd->platform_ops->unmap_page(d, dfn);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU unmapping gfn %#lx failed: %d\n",
- d->domain_id, gfn, rc);
+ "d%d: IOMMU unmapping dfn %#lx failed: %d\n",
+ d->domain_id, dfn, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -349,7 +349,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+int iommu_iotlb_flush(struct domain *d, unsigned long dfn,
unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
@@ -358,13 +358,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
return 0;
- rc = hd->platform_ops->iotlb_flush(d, gfn, page_count);
+ rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, gfn %#lx, page count
%u\n",
- d->domain_id, rc, gfn, page_count);
+ "d%d: IOMMU IOTLB flush failed: %d, dfn %#lx, page count
%u\n",
+ d->domain_id, rc, dfn, page_count);
if ( !is_hardware_domain(d) )
domain_crash(d);
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index bb422ec58c..507a3f3afa 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -585,7 +585,7 @@ static int __must_check iommu_flush_all(void)
}
static int __must_check iommu_flush_iotlb(struct domain *d,
- unsigned long gfn,
+ unsigned long dfn,
bool_t dma_old_pte_present,
unsigned int page_count)
{
@@ -612,12 +612,12 @@ static int __must_check iommu_flush_iotlb(struct domain
*d,
if ( iommu_domid == -1 )
continue;
- if ( page_count != 1 || gfn == gfn_x(INVALID_GFN) )
+ if ( page_count != 1 || dfn == dfn_x(INVALID_DFN) )
rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
0, flush_dev_iotlb);
else
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
- (paddr_t)gfn << PAGE_SHIFT_4K,
+ __dfn_to_daddr(dfn),
PAGE_ORDER_4K,
!dma_old_pte_present,
flush_dev_iotlb);
@@ -633,15 +633,15 @@ static int __must_check iommu_flush_iotlb(struct domain
*d,
}
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
- unsigned long gfn,
+ unsigned long dfn,
unsigned int page_count)
{
- return iommu_flush_iotlb(d, gfn, 1, page_count);
+ return iommu_flush_iotlb(d, dfn, 1, page_count);
}
static int __must_check iommu_flush_iotlb_all(struct domain *d)
{
- return iommu_flush_iotlb(d, gfn_x(INVALID_GFN), 0, 0);
+ return iommu_flush_iotlb(d, dfn_x(INVALID_DFN), 0, 0);
}
/* clear one page's page table */
@@ -1770,7 +1770,7 @@ static void iommu_domain_teardown(struct domain *d)
}
static int __must_check intel_iommu_map_page(struct domain *d,
- unsigned long gfn,
+ unsigned long dfn,
unsigned long mfn,
unsigned int flags)
{
@@ -1789,14 +1789,14 @@ static int __must_check intel_iommu_map_page(struct
domain *d,
spin_lock(&hd->arch.mapping_lock);
- pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
+ pg_maddr = addr_to_dma_page_maddr(d, __dfn_to_daddr(dfn), 1);
if ( pg_maddr == 0 )
{
spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
- pte = page + (gfn & LEVEL_MASK);
+ pte = page + (dfn & LEVEL_MASK);
old = *pte;
dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
dma_set_pte_prot(new,
@@ -1820,22 +1820,22 @@ static int __must_check intel_iommu_map_page(struct
domain *d,
unmap_vtd_domain_page(page);
if ( !this_cpu(iommu_dont_flush_iotlb) )
- rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
+ rc = iommu_flush_iotlb(d, dfn, dma_pte_present(old), 1);
return rc;
}
static int __must_check intel_iommu_unmap_page(struct domain *d,
- unsigned long gfn)
+ unsigned long dfn)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
- return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
+ return dma_pte_clear_one(d, __dfn_to_daddr(dfn));
}
-int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
int order, int present)
{
struct acpi_drhd_unit *drhd;
@@ -1859,7 +1859,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
continue;
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
- (paddr_t)gfn << PAGE_SHIFT_4K,
+ __dfn_to_daddr(dfn),
order, !present, flush_dev_iotlb);
if ( rc > 0 )
{
@@ -2629,7 +2629,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr,
int level, paddr_t gpa,
vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
address, indent + 1);
else
- printk("%*sgfn: %08lx mfn: %08lx\n",
+ printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
(unsigned long)(address >> PAGE_SHIFT_4K),
(unsigned long)(dma_pte_addr(*pte) >> PAGE_SHIFT_4K));
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 14ad0489a6..0ed4a9e86d 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -30,6 +30,18 @@ struct g2m_ioport {
unsigned int np;
};
+#define IOMMU_PAGE_SHIFT 12
+#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT)
+#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
+
+typedef uint64_t daddr_t;
+
+#define __dfn_to_daddr(dfn) ((daddr_t)(dfn) << IOMMU_PAGE_SHIFT)
+#define __daddr_to_dfn(daddr) ((daddr) >> IOMMU_PAGE_SHIFT)
+
+#define dfn_to_daddr(dfn) __dfn_to_daddr(dfn_x(dfn))
+#define daddr_to_dfn(daddr) _dfn(__daddr_to_dfn(daddr))
+
struct arch_iommu
{
u64 pgd_maddr; /* io page directory machine address */
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 57c4e81ec6..290e0aada6 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -23,11 +23,25 @@
#include <xen/page-defs.h>
#include <xen/spinlock.h>
#include <xen/pci.h>
+#include <xen/typesafe.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
#include <asm/device.h>
#include <asm/iommu.h>
+TYPE_SAFE(uint64_t, dfn);
+#define PRI_dfn PRIx64
+#define INVALID_DFN _dfn(~0ULL)
+
+#ifndef dfn_t
+#define dfn_t /* Grep fodder: dfn_t, _dfn() and dfn_x() are defined above */
+#define _dfn
+#define dfn_x
+#undef dfn_t
+#undef _dfn
+#undef dfn_x
+#endif
+
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx;
@@ -64,9 +78,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
+int __must_check iommu_map_page(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check iommu_unmap_page(struct domain *d, unsigned long dfn);
enum iommu_feature
{
@@ -154,9 +168,9 @@ struct iommu_ops {
#endif /* HAS_PCI */
void (*teardown)(struct domain *d);
- int __must_check (*map_page)(struct domain *d, unsigned long gfn,
+ int __must_check (*map_page)(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags);
- int __must_check (*unmap_page)(struct domain *d, unsigned long gfn);
+ int __must_check (*unmap_page)(struct domain *d, unsigned long dfn);
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned
int value);
@@ -167,7 +181,7 @@ struct iommu_ops {
void (*resume)(void);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
- int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn,
+ int __must_check (*iotlb_flush)(struct domain *d, unsigned long dfn,
unsigned int page_count);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
@@ -189,7 +203,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain
*d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+int __must_check iommu_iotlb_flush(struct domain *d, unsigned long dfn,
unsigned int page_count);
int __must_check iommu_iotlb_flush_all(struct domain *d);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9595539aee..054d02e6c0 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -26,6 +26,11 @@
* A linear idea of a guest physical address space. For an auto-translated
* guest, pfn == gfn while for a non-translated guest, pfn != gfn.
*
+ * dfn: Device DMA Frame Number (definitions in include/xen/iommu.h)
+ * The linear frame numbers of device DMA address space. All initiators for
+ * (i.e. all devices assigned to) a guest share a single DMA address space
+ * and, by default, Xen will ensure dfn == pfn.
+ *
* WARNING: Some of these terms have changed over time while others have been
* used inconsistently, meaning that a lot of existing code does not match the
* definitions above. New code should use these terms as described here, and
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |