[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.13] AMD/IOMMU: fix off-by-one in amd_iommu_get_paging_mode() callers



commit 181614a71070ee1dfb8d1fb954ca6127c24691a3
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Apr 9 09:26:54 2020 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Apr 9 09:26:54 2020 +0200

    AMD/IOMMU: fix off-by-one in amd_iommu_get_paging_mode() callers
    
    amd_iommu_get_paging_mode() expects a count, not a "maximum possible"
    value. Prior to b4f042236ae0 dropping the reference, the use of our mis-
    named "max_page" in amd_iommu_domain_init() may have lead to such a
    misunderstanding. In an attempt to avoid such confusion in the future,
    rename the function's parameter and - while at it - convert it to an
    inline function.
    
    Also replace a literal 4 by an expression tying it to a wider use
    constant, just like amd_iommu_quarantine_init() does.
    
    Fixes: ea38867831da ("x86 / iommu: set up a scratch page in the quarantine 
domain")
    Fixes: b4f042236ae0 ("AMD/IOMMU: Cease using a dynamic height for the IOMMU 
pagetables")
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: b75b3c62fe4afe381c6f74a07f614c0b39fe2f5d
    master date: 2020-03-16 11:24:29 +0100
---
 xen/drivers/passthrough/amd/iommu_map.c       |  6 +++---
 xen/drivers/passthrough/amd/pci_amd_iommu.c   | 22 ++++------------------
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 17 ++++++++++++++++-
 3 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 4e041b960f..5be27fe9f8 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -455,9 +455,9 @@ int amd_iommu_reserve_domain_unity_map(struct domain 
*domain,
 int __init amd_iommu_quarantine_init(struct domain *d)
 {
     struct domain_iommu *hd = dom_iommu(d);
-    unsigned long max_gfn =
-        PFN_DOWN((1ul << DEFAULT_DOMAIN_ADDRESS_WIDTH) - 1);
-    unsigned int level = amd_iommu_get_paging_mode(max_gfn);
+    unsigned long end_gfn =
+        1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT);
+    unsigned int level = amd_iommu_get_paging_mode(end_gfn);
     struct amd_iommu_pte *table;
 
     if ( hd->arch.root_table )
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index dd3401f0dc..fd12858578 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -231,22 +231,6 @@ static int __must_check allocate_domain_resources(struct 
domain_iommu *hd)
     return rc;
 }
 
-int amd_iommu_get_paging_mode(unsigned long entries)
-{
-    int level = 1;
-
-    BUG_ON( !entries );
-
-    while ( entries > PTE_PER_TABLE_SIZE )
-    {
-        entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
-        if ( ++level > 6 )
-            return -ENOMEM;
-    }
-
-    return level;
-}
-
 static int amd_iommu_domain_init(struct domain *d)
 {
     struct domain_iommu *hd = dom_iommu(d);
@@ -259,8 +243,10 @@ static int amd_iommu_domain_init(struct domain *d)
      *   physical address space we give it, but this isn't known yet so use 4
      *   unilaterally.
      */
-    hd->arch.paging_mode = is_hvm_domain(d)
-        ? 4 : amd_iommu_get_paging_mode(get_upper_mfn_bound());
+    hd->arch.paging_mode = amd_iommu_get_paging_mode(
+        is_hvm_domain(d)
+        ? 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT)
+        : get_upper_mfn_bound() + 1);
 
     return 0;
 }
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 664dfc93b9..94e288f52d 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -54,7 +54,6 @@ int amd_iommu_init_late(void);
 int amd_iommu_update_ivrs_mapping_acpi(void);
 int iov_adjust_irq_affinities(void);
 
-int amd_iommu_get_paging_mode(unsigned long entries);
 int amd_iommu_quarantine_init(struct domain *d);
 
 /* mapping functions */
@@ -177,6 +176,22 @@ static inline unsigned long region_to_pages(unsigned long 
addr, unsigned long si
     return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
+static inline int amd_iommu_get_paging_mode(unsigned long max_frames)
+{
+    int level = 1;
+
+    BUG_ON(!max_frames);
+
+    while ( max_frames > PTE_PER_TABLE_SIZE )
+    {
+        max_frames = PTE_PER_TABLE_ALIGN(max_frames) >> PTE_PER_TABLE_SHIFT;
+        if ( ++level > 6 )
+            return -ENOMEM;
+    }
+
+    return level;
+}
+
 static inline struct page_info* alloc_amd_iommu_pgtable(void)
 {
     struct page_info *pg;
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.13



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.