[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: refactor physical address space compression support into common code



commit e028cedcae2edafb6ba176ec1e8c269346be05bc
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Wed Sep 17 22:21:01 2014 +0100
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Mon Sep 22 17:02:08 2014 +0100

    xen: refactor physical address space compression support into common code
    
    The "pdx compression" functionality will be useful on ARM as well.
    
    Move the code to common code+header and introduce HAS_PDX to control when 
it is
    built. L2_PAGETABLE_SHIFT is x86 specific, so introduce PDX_GROUP_SHIFT to
    abstract it out.
    
    ARM has no need for superpage compression (yet?) and lacks SUPERPAGE_SHIFT 
so
    those functions (spage_to_mfn et al) are not moved.
    
    No affect on x86 and no change for ARM (yet).
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/Rules.mk                      |    1 +
 xen/arch/x86/Rules.mk             |    1 +
 xen/arch/x86/mm.c                 |    3 -
 xen/arch/x86/setup.c              |   10 ----
 xen/arch/x86/x86_64/mm.c          |   53 --------------------
 xen/common/Makefile               |    1 +
 xen/common/pdx.c                  |   99 +++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/mm.h          |    4 +-
 xen/include/asm-x86/page.h        |    2 -
 xen/include/asm-x86/x86_64/page.h |   25 +---------
 xen/include/xen/pdx.h             |   47 +++++++++++++++++
 11 files changed, 152 insertions(+), 94 deletions(-)

diff --git a/xen/Rules.mk b/xen/Rules.mk
index b49f3c8..e2f9e36 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -59,6 +59,7 @@ CFLAGS-$(HAS_PASSTHROUGH) += -DHAS_PASSTHROUGH
 CFLAGS-$(HAS_DEVICE_TREE) += -DHAS_DEVICE_TREE
 CFLAGS-$(HAS_PCI)       += -DHAS_PCI
 CFLAGS-$(HAS_IOPORTS)   += -DHAS_IOPORTS
+CFLAGS-$(HAS_PDX)       += -DHAS_PDX
 CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
 
 ifneq ($(max_phys_cpus),)
diff --git a/xen/arch/x86/Rules.mk b/xen/arch/x86/Rules.mk
index 576985e..6775cb5 100644
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -12,6 +12,7 @@ HAS_NS16550 := y
 HAS_EHCI := y
 HAS_KEXEC := y
 HAS_GDBSX := y
+HAS_PDX := y
 xenoprof := y
 
 #
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d23cb3f..5b3f06f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -147,9 +147,6 @@ struct domain *dom_xen, *dom_io, *dom_cow;
 unsigned long max_page;
 unsigned long total_pages;
 
-unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
-    (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
-
 bool_t __read_mostly machine_to_phys_mapping_valid = 0;
 
 struct rangeset *__read_mostly mmio_ro_ranges;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 6a814cd..8c8b91f 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -392,16 +392,6 @@ static void __init setup_max_pdx(unsigned long top_page)
     max_page = pdx_to_pfn(max_pdx - 1) + 1;
 }
 
-void set_pdx_range(unsigned long smfn, unsigned long emfn)
-{
-    unsigned long idx, eidx;
-
-    idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
-    eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
-    for ( ; idx < eidx; ++idx )
-        __set_bit(idx, pdx_group_valid);
-}
-
 /* A temporary copy of the e820 map that we can mess with during bootstrap. */
 static struct e820map __initdata boot_e820;
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 4937f9a..09817fc 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -40,15 +40,6 @@
 #include <asm/mem_access.h>
 #include <public/memory.h>
 
-/* Parameters for PFN/MADDR compression. */
-unsigned long __read_mostly max_pdx;
-unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
-unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
-unsigned long __read_mostly pfn_top_mask = 0;
-unsigned long __read_mostly ma_top_mask = 0;
-unsigned long __read_mostly pfn_hole_mask = 0;
-unsigned int __read_mostly pfn_pdx_hole_shift = 0;
-
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
 /* Enough page directories to map into the bottom 1GB. */
@@ -59,14 +50,6 @@ l2_pgentry_t __attribute__ ((__section__ 
(".bss.page_aligned")))
 
 l2_pgentry_t *compat_idle_pg_table_l2;
 
-int __mfn_valid(unsigned long mfn)
-{
-    return likely(mfn < max_page) &&
-           likely(!(mfn & pfn_hole_mask)) &&
-           likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
-                           pdx_group_valid));
-}
-
 void *do_page_walk(struct vcpu *v, unsigned long addr)
 {
     unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
@@ -119,42 +102,6 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
     return map_domain_page(mfn) + (addr & ~PAGE_MASK);
 }
 
-void __init pfn_pdx_hole_setup(unsigned long mask)
-{
-    unsigned int i, j, bottom_shift = 0, hole_shift = 0;
-
-    /*
-     * We skip the first MAX_ORDER bits, as we never want to compress them.
-     * This guarantees that page-pointer arithmetic remains valid within
-     * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
-     * buddy allocator relies on this assumption.
-     */
-    for ( j = MAX_ORDER-1; ; )
-    {
-        i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
-        j = find_next_bit(&mask, BITS_PER_LONG, i);
-        if ( j >= BITS_PER_LONG )
-            break;
-        if ( j - i > hole_shift )
-        {
-            hole_shift = j - i;
-            bottom_shift = i;
-        }
-    }
-    if ( !hole_shift )
-        return;
-
-    printk(KERN_INFO "PFN compression on bits %u...%u\n",
-           bottom_shift, bottom_shift + hole_shift - 1);
-
-    pfn_pdx_hole_shift  = hole_shift;
-    pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
-    ma_va_bottom_mask   = (PAGE_SIZE << bottom_shift) - 1;
-    pfn_hole_mask       = ((1UL << hole_shift) - 1) << bottom_shift;
-    pfn_top_mask        = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
-    ma_top_mask         = pfn_top_mask << PAGE_SHIFT;
-}
-
 /*
  * Allocate page table pages for m2p table
  */
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 3683ae3..f7d10f0 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -51,6 +51,7 @@ obj-y += tmem_xen.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += lzo.o
+obj-$(HAS_PDX) += pdx.o
 
 obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma unlzo 
unlz4 earlycpio,$(n).init.o)
 
diff --git a/xen/common/pdx.c b/xen/common/pdx.c
new file mode 100644
index 0000000..11349a7
--- /dev/null
+++ b/xen/common/pdx.c
@@ -0,0 +1,99 @@
+/******************************************************************************
+ * Original code extracted from arch/x86/x86_64/mm.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/bitops.h>
+
+/* Parameters for PFN/MADDR compression. */
+unsigned long __read_mostly max_pdx;
+unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
+unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
+unsigned long __read_mostly pfn_top_mask = 0;
+unsigned long __read_mostly ma_top_mask = 0;
+unsigned long __read_mostly pfn_hole_mask = 0;
+unsigned int __read_mostly pfn_pdx_hole_shift = 0;
+
+unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
+    (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
+
+int __mfn_valid(unsigned long mfn)
+{
+    return likely(mfn < max_page) &&
+           likely(!(mfn & pfn_hole_mask)) &&
+           likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
+                           pdx_group_valid));
+}
+
+void set_pdx_range(unsigned long smfn, unsigned long emfn)
+{
+    unsigned long idx, eidx;
+
+    idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
+    eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
+
+    for ( ; idx < eidx; ++idx )
+        __set_bit(idx, pdx_group_valid);
+}
+
+void __init pfn_pdx_hole_setup(unsigned long mask)
+{
+    unsigned int i, j, bottom_shift = 0, hole_shift = 0;
+
+    /*
+     * We skip the first MAX_ORDER bits, as we never want to compress them.
+     * This guarantees that page-pointer arithmetic remains valid within
+     * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
+     * buddy allocator relies on this assumption.
+     */
+    for ( j = MAX_ORDER-1; ; )
+    {
+        i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
+        j = find_next_bit(&mask, BITS_PER_LONG, i);
+        if ( j >= BITS_PER_LONG )
+            break;
+        if ( j - i > hole_shift )
+        {
+            hole_shift = j - i;
+            bottom_shift = i;
+        }
+    }
+    if ( !hole_shift )
+        return;
+
+    printk(KERN_INFO "PFN compression on bits %u...%u\n",
+           bottom_shift, bottom_shift + hole_shift - 1);
+
+    pfn_pdx_hole_shift  = hole_shift;
+    pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
+    ma_va_bottom_mask   = (PAGE_SIZE << bottom_shift) - 1;
+    pfn_hole_mask       = ((1UL << hole_shift) - 1) << bottom_shift;
+    pfn_top_mask        = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
+    ma_top_mask         = pfn_top_mask << PAGE_SHIFT;
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7b85865..746bcf1 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -280,9 +280,7 @@ extern unsigned long max_page;
 extern unsigned long total_pages;
 void init_frametable(void);
 
-#define PDX_GROUP_COUNT ((1 << L2_PAGETABLE_SHIFT) / \
-                         (sizeof(*frame_table) & -sizeof(*frame_table)))
-extern unsigned long pdx_group_valid[];
+#define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *__virt_to_page(const void *v)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index ccc268d..9aa780e 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -334,8 +334,6 @@ void *alloc_xen_pagetable(void);
 void free_xen_pagetable(void *v);
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
 
-extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
-
 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
 {
diff --git a/xen/include/asm-x86/x86_64/page.h 
b/xen/include/asm-x86/x86_64/page.h
index 3eee5b5..1d54587 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -35,17 +35,10 @@
 #include <xen/config.h>
 #include <asm/types.h>
 
-extern unsigned long xen_virt_end;
+#include <xen/pdx.h>
 
-extern unsigned long max_pdx;
-extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
-extern unsigned int pfn_pdx_hole_shift;
-extern unsigned long pfn_hole_mask;
-extern unsigned long pfn_top_mask, ma_top_mask;
-extern void pfn_pdx_hole_setup(unsigned long);
+extern unsigned long xen_virt_end;
 
-#define page_to_pdx(pg)  ((pg) - frame_table)
-#define pdx_to_page(pdx) (frame_table + (pdx))
 #define spage_to_pdx(spg) (((spg) - spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT))
 #define pdx_to_spage(pdx) (spage_table + ((pdx)>>(SUPERPAGE_SHIFT-PAGE_SHIFT)))
 /*
@@ -57,20 +50,6 @@ extern void pfn_pdx_hole_setup(unsigned long);
 #define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
                                    ((unsigned long)(pdx) << PAGE_SHIFT)))
 
-extern int __mfn_valid(unsigned long mfn);
-
-static inline unsigned long pfn_to_pdx(unsigned long pfn)
-{
-    return (pfn & pfn_pdx_bottom_mask) |
-           ((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
-}
-
-static inline unsigned long pdx_to_pfn(unsigned long pdx)
-{
-    return (pdx & pfn_pdx_bottom_mask) |
-           ((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
-}
-
 static inline unsigned long pfn_to_sdx(unsigned long pfn)
 {
     return pfn_to_pdx(pfn) >> (SUPERPAGE_SHIFT-PAGE_SHIFT);
diff --git a/xen/include/xen/pdx.h b/xen/include/xen/pdx.h
new file mode 100644
index 0000000..624f04f
--- /dev/null
+++ b/xen/include/xen/pdx.h
@@ -0,0 +1,47 @@
+#ifndef __XEN_PDX_H__
+#define __XEN_PDX_H__
+
+#ifdef HAS_PDX
+
+extern unsigned long max_pdx;
+extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
+extern unsigned int pfn_pdx_hole_shift;
+extern unsigned long pfn_hole_mask;
+extern unsigned long pfn_top_mask, ma_top_mask;
+
+#define PDX_GROUP_COUNT ((1 << PDX_GROUP_SHIFT) / \
+                         (sizeof(*frame_table) & -sizeof(*frame_table)))
+extern unsigned long pdx_group_valid[];
+
+extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
+
+#define page_to_pdx(pg)  ((pg) - frame_table)
+#define pdx_to_page(pdx) (frame_table + (pdx))
+
+extern int __mfn_valid(unsigned long mfn);
+
+static inline unsigned long pfn_to_pdx(unsigned long pfn)
+{
+    return (pfn & pfn_pdx_bottom_mask) |
+           ((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
+}
+
+static inline unsigned long pdx_to_pfn(unsigned long pdx)
+{
+    return (pdx & pfn_pdx_bottom_mask) |
+           ((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
+}
+
+extern void pfn_pdx_hole_setup(unsigned long);
+
+#endif /* HAS_PDX */
+#endif /* __XEN_PDX_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.