[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 24 of 36] x86_64: create small vmemmap mappings if PSE not available



If PSE is not available, then fall back to 4k page mappings for the
vmemmap area.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
 arch/x86/mm/init_64.c |   62 +++++++++++++++++++++++++++++++------------------
 1 file changed, 40 insertions(+), 22 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -997,7 +997,7 @@
        pmd_t *pmd;
 
        for (; addr < end; addr = next) {
-               next = pmd_addr_end(addr, end);
+               void *p = NULL;
 
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
@@ -1007,33 +1007,51 @@
                if (!pud)
                        return -ENOMEM;
 
-               pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd)) {
-                       pte_t entry;
-                       void *p;
+               if (!cpu_has_pse) {
+                       next = (addr + PAGE_SIZE) & PAGE_MASK;
+                       pmd = vmemmap_pmd_populate(pud, addr, node);
 
-                       p = vmemmap_alloc_block(PMD_SIZE, node);
+                       if (!pmd)
+                               return -ENOMEM;
+
+                       p = vmemmap_pte_populate(pmd, addr, node);
+
                        if (!p)
                                return -ENOMEM;
 
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
-                                                       PAGE_KERNEL_LARGE);
-                       set_pmd(pmd, __pmd(pte_val(entry)));
+                       addr_end = addr + PAGE_SIZE;
+                       p_end = p + PAGE_SIZE;
+               } else {
+                       next = pmd_addr_end(addr, end);
 
-                       /* check to see if we have contiguous blocks */
-                       if (p_end != p || node_start != node) {
-                               if (p_start)
-                                       printk(KERN_DEBUG " [%lx-%lx] PMD -> 
[%p-%p] on node %d\n",
-                                               addr_start, addr_end-1, 
p_start, p_end-1, node_start);
-                               addr_start = addr;
-                               node_start = node;
-                               p_start = p;
-                       }
-                       addr_end = addr + PMD_SIZE;
-                       p_end = p + PMD_SIZE;
-               } else {
-                       vmemmap_verify((pte_t *)pmd, node, addr, next);
+                       pmd = pmd_offset(pud, addr);
+                       if (pmd_none(*pmd)) {
+                               pte_t entry;
+
+                               p = vmemmap_alloc_block(PMD_SIZE, node);
+                               if (!p)
+                                       return -ENOMEM;
+
+                               entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+                                               PAGE_KERNEL_LARGE);
+                               set_pmd(pmd, __pmd(pte_val(entry)));
+
+                               addr_end = addr + PMD_SIZE;
+                               p_end = p + PMD_SIZE;
+
+                               /* check to see if we have contiguous blocks */
+                               if (p_end != p || node_start != node) {
+                                       if (p_start)
+                                               printk(KERN_DEBUG " [%lx-%lx] 
PMD -> [%p-%p] on node %d\n",
+                                                      addr_start, addr_end-1, 
p_start, p_end-1, node_start);
+                                       addr_start = addr;
+                                       node_start = node;
+                                       p_start = p;
+                               }
+                       } else
+                               vmemmap_verify((pte_t *)pmd, node, addr, next);
                }
+
        }
        return 0;
 }



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.