|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [V10 PATCH 1/4] pvh dom0: construct_dom0 changes
On 30/04/14 03:06, Mukesh Rathor wrote:
> This patch changes construct_dom0() to boot in pvh mode:
> - Make sure dom0 elf supports pvh mode.
> - Call guest_physmap_add_page for pvh rather than simple p2m setting
> - Map all non-RAM regions 1:1 upto the end region in e820 or 4GB which
> ever is higher.
> - Allocate p2m, copying calculation from toolstack.
> - Allocate shared info page from the virtual space so that dom0 PT
> can be updated. Then update p2m for it with the actual mfn.
> - Since we build the page tables for pvh same as for pv, in
> pvh_fixup_page_tables_for_hap we replace the mfns with pfns.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
I have the following patch on top of yours, in order to add the memory
removed by the holes to the end of the memory map:
---
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 38ed9f6..6f5ba22 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -327,11 +327,13 @@ static __init void pvh_add_mem_mapping(struct domain *d,
unsigned long gfn,
* pvh fixme: The following doesn't map MMIO ranges when they sit above the
* highest E820 covered address.
*/
-static __init void pvh_map_all_iomem(struct domain *d)
+static __init void pvh_map_all_iomem(struct domain *d, unsigned long nr_pages)
{
unsigned long start_pfn, end_pfn, end = 0, start = 0;
const struct e820entry *entry;
- unsigned int i, nump;
+ unsigned int i, j, nump, navail, nmap, nr_holes = 0;
+ struct page_info *page;
+ int rc;
for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
{
@@ -353,6 +355,9 @@ static __init void pvh_map_all_iomem(struct domain *d)
nump = end_pfn - start_pfn;
/* Add pages to the mapping */
pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+ if (start_pfn <= nr_pages)
+ nr_holes += (end_pfn < nr_pages) ?
+ nump : (nr_pages - start_pfn);
}
start = end;
}
@@ -369,6 +374,42 @@ static __init void pvh_map_all_iomem(struct domain *d)
nump = end_pfn - start_pfn;
pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
}
+
+ /*
+ * Add the memory removed by the holes at the end of the
+ * memory map.
+ */
+ for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
+ {
+ if ( entry->type != E820_RAM )
+ continue;
+
+ end_pfn = PFN_UP(entry->addr + entry->size);
+ if ( end_pfn <= nr_pages )
+ continue;
+
+ navail = end_pfn - nr_pages;
+ nmap = navail > nr_holes ? nr_holes : navail;
+ start_pfn = PFN_DOWN(entry->addr) < nr_pages ?
+ nr_pages : PFN_DOWN(entry->addr);
+ page = alloc_domheap_pages(d, get_order_from_pages(nmap), 0);
+ if ( !page )
+ panic("Not enough RAM for domain 0");
+ for ( j = 0; j < nmap; j++ )
+ {
+ rc = guest_physmap_add_page(d, start_pfn + j, page_to_mfn(page),
0);
+ if (rc != 0)
+ panic("Unable to add gpfn %#lx mfn %#lx to Dom0 physmap",
+ start_pfn + j, page_to_mfn(page));
+ page++;
+
+ }
+ nr_holes -= nmap;
+ if (nr_holes == 0)
+ break;
+ }
+
+ ASSERT(nr_holes == 0);
}
static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
@@ -1391,7 +1432,7 @@ int __init construct_dom0(
pfn = shared_info_paddr >> PAGE_SHIFT;
dom0_update_physmap(d, pfn, mfn, 0);
- pvh_map_all_iomem(d);
+ pvh_map_all_iomem(d, nr_pages);
}
if ( d->domain_id == hardware_domid )
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |