[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging-4.10] x86: extend get_platform_badpages() interface



commit 4c7cd94808e545927b660474ebb849c83e9733d1
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Nov 7 09:44:28 2018 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Nov 7 09:44:28 2018 +0100

    x86: extend get_platform_badpages() interface
    
    Use a structure so along with an address (now frame number) an order can
    also be specified.
    
    This is part of XSA-282.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: 8617e69fb8307b372eeff41d55ec966dbeba36eb
    master date: 2018-11-07 09:32:08 +0100
---
 xen/arch/x86/guest/xen.c        |  6 +++---
 xen/arch/x86/mm.c               | 22 +++++++++++-----------
 xen/common/page_alloc.c         | 10 +++++-----
 xen/include/asm-x86/guest/xen.h |  4 ++--
 xen/include/asm-x86/mm.h        |  8 +++++++-
 5 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index 2a5554ab26..0b0058b45e 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -40,7 +40,7 @@ bool __read_mostly xen_guest;
 static __read_mostly uint32_t xen_cpuid_base;
 extern char hypercall_page[];
 static struct rangeset *mem;
-static unsigned long __initdata reserved_pages[2];
+static struct platform_bad_page __initdata reserved_pages[2];
 
 DEFINE_PER_CPU(unsigned int, vcpu_id);
 
@@ -326,7 +326,7 @@ void __init hypervisor_fixup_e820(struct e820map *e820)
         panic("Unable to get " #p);             \
     mark_pfn_as_ram(e820, pfn);                 \
     ASSERT(i < ARRAY_SIZE(reserved_pages));     \
-    reserved_pages[i++] = pfn << PAGE_SHIFT;    \
+    reserved_pages[i++].mfn = pfn;              \
 })
     MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
     if ( !pv_console )
@@ -334,7 +334,7 @@ void __init hypervisor_fixup_e820(struct e820map *e820)
 #undef MARK_PARAM_RAM
 }
 
-const unsigned long *__init hypervisor_reserved_pages(unsigned int *size)
+const struct platform_bad_page *__init hypervisor_reserved_pages(unsigned int 
*size)
 {
     ASSERT(xen_guest);
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index f3dfe35785..4d3753d42a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5680,23 +5680,23 @@ void arch_dump_shared_mem_info(void)
             mem_sharing_get_nr_saved_mfns());
 }
 
-const unsigned long *__init get_platform_badpages(unsigned int *array_size)
+const struct platform_bad_page *__init get_platform_badpages(unsigned int 
*array_size)
 {
     u32 igd_id;
-    static unsigned long __initdata bad_pages[] = {
-        0x20050000,
-        0x20110000,
-        0x20130000,
-        0x20138000,
-        0x40004000,
+    static const struct platform_bad_page __initconst snb_bad_pages[] = {
+        { .mfn = 0x20050000 >> PAGE_SHIFT },
+        { .mfn = 0x20110000 >> PAGE_SHIFT },
+        { .mfn = 0x20130000 >> PAGE_SHIFT },
+        { .mfn = 0x20138000 >> PAGE_SHIFT },
+        { .mfn = 0x40004000 >> PAGE_SHIFT },
     };
 
-    *array_size = ARRAY_SIZE(bad_pages);
+    *array_size = ARRAY_SIZE(snb_bad_pages);
     igd_id = pci_conf_read32(0, 0, 2, 0, 0);
-    if ( !IS_SNB_GFX(igd_id) )
-        return NULL;
+    if ( IS_SNB_GFX(igd_id) )
+        return snb_bad_pages;
 
-    return bad_pages;
+    return NULL;
 }
 
 void paging_invlpg(struct vcpu *v, unsigned long va)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 6d6f2a0628..598c3432c9 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -270,7 +270,7 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe)
     unsigned long bad_spfn, bad_epfn;
     const char *p;
 #ifdef CONFIG_X86
-    const unsigned long *badpage = NULL;
+    const struct platform_bad_page *badpage;
     unsigned int i, array_size;
 
     BUILD_BUG_ON(8 * sizeof(frame_table->u.free.first_dirty) <
@@ -299,8 +299,8 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe)
     {
         for ( i = 0; i < array_size; i++ )
         {
-            bootmem_region_zap(*badpage >> PAGE_SHIFT,
-                               (*badpage >> PAGE_SHIFT) + 1);
+            bootmem_region_zap(badpage->mfn,
+                               badpage->mfn + (1U << badpage->order));
             badpage++;
         }
     }
@@ -312,8 +312,8 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe)
         {
             for ( i = 0; i < array_size; i++ )
             {
-                bootmem_region_zap(*badpage >> PAGE_SHIFT,
-                                   (*badpage >> PAGE_SHIFT) + 1);
+                bootmem_region_zap(badpage->mfn,
+                                   badpage->mfn + (1U << badpage->order));
                 badpage++;
             }
         }
diff --git a/xen/include/asm-x86/guest/xen.h b/xen/include/asm-x86/guest/xen.h
index c0acf4c36e..6f15e24b6b 100644
--- a/xen/include/asm-x86/guest/xen.h
+++ b/xen/include/asm-x86/guest/xen.h
@@ -37,7 +37,7 @@ void hypervisor_ap_setup(void);
 int hypervisor_alloc_unused_page(mfn_t *mfn);
 int hypervisor_free_unused_page(mfn_t mfn);
 void hypervisor_fixup_e820(struct e820map *e820);
-const unsigned long *hypervisor_reserved_pages(unsigned int *size);
+const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size);
 uint32_t hypervisor_cpuid_base(void);
 void hypervisor_resume(void);
 
@@ -65,7 +65,7 @@ static inline void hypervisor_fixup_e820(struct e820map *e820)
     ASSERT_UNREACHABLE();
 }
 
-static inline const unsigned long *hypervisor_reserved_pages(unsigned int 
*size)
+static inline const struct platform_bad_page 
*hypervisor_reserved_pages(unsigned int *size)
 {
     ASSERT_UNREACHABLE();
     return NULL;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index f8ba0be247..75d30804e0 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -349,7 +349,13 @@ void zap_ro_mpt(mfn_t mfn);
 
 bool is_iomem_page(mfn_t mfn);
 
-const unsigned long *get_platform_badpages(unsigned int *array_size);
+struct platform_bad_page {
+    unsigned long mfn;
+    unsigned int order;
+};
+
+const struct platform_bad_page *get_platform_badpages(unsigned int 
*array_size);
+
 /* Per page locks:
  * page_lock() is used for two purposes: pte serialization, and memory sharing.
  *
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.10

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.