[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 5/7] xen/pvh: Move Xen code for getting mem map via hcall out of common file
We need to refactor PVH entry code so that support for other hypervisors like Qemu/KVM can be added more easily. The original design for PVH entry in Xen guests relies on being able to obtain the memory map from the hypervisor using a hypercall. When we extend the PVH entry ABI to support other hypervisors like Qemu/KVM, a new mechanism will be added that allows the guest to get the memory map without needing to use hypercalls. For Xen guests, the hypercall approach will still be supported. In preparation for adding support for other hypervisors, we can move the code that uses hypercalls into the Xen specific file. This will allow us to compile kernels in the future without CONFIG_XEN that are still capable of being booted as a Qemu/KVM guest via the PVH entry point. Signed-off-by: Maran Wilson <maran.wilson@xxxxxxxxxx> Reviewed-by: Juergen Gross <jgross@xxxxxxxx> --- arch/x86/platform/pvh/enlighten.c | 29 ++++++++++++++--------------- arch/x86/xen/enlighten_pvh.c | 20 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c index 637bd74ba32d..8040b3fbf545 100644 --- a/arch/x86/platform/pvh/enlighten.c +++ b/arch/x86/platform/pvh/enlighten.c @@ -8,11 +8,7 @@ #include <asm/e820/api.h> #include <asm/x86_init.h> -#include <asm/xen/interface.h> -#include <asm/xen/hypercall.h> - #include <xen/xen.h> -#include <xen/interface/memory.h> #include <xen/interface/hvm/start_info.h> /* @@ -31,21 +27,24 @@ static u64 pvh_get_root_pointer(void) return pvh_start_info.rsdp_paddr; } +/* + * Xen guests are able to obtain the memory map from the hypervisor via the + * HYPERVISOR_memory_op hypercall. + * If we are trying to boot a Xen PVH guest, it is expected that the kernel + * will have been configured to provide an override for this routine to do + * just that. + */ +void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused) +{ + xen_raw_printk("Error: Could not find memory map\n"); + BUG(); +} + static void __init init_pvh_bootparams(void) { - struct xen_memory_map memmap; - int rc; - memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); - memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table); - set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table); - rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); - if (rc) { - xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); - BUG(); - } - pvh_bootparams.e820_entries = memmap.nr_entries; + mem_map_via_hcall(&pvh_bootparams); if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) { pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr = diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index 41a7d6ad74e0..35b7599d2d0b 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -1,13 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/acpi.h> +#include <xen/hvc-console.h> + #include <asm/io_apic.h> #include <asm/hypervisor.h> +#include <asm/e820/api.h> #include <xen/xen.h> #include <asm/xen/interface.h> #include <asm/xen/hypercall.h> +#include <xen/interface/memory.h> + /* * PVH variables. * @@ -28,3 +33,18 @@ void __init xen_pvh_init(void) pfn = __pa(hypercall_page); wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); } + +void __init mem_map_via_hcall(struct boot_params *boot_params_p) +{ + struct xen_memory_map memmap; + int rc; + + memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table); + set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table); + rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); + if (rc) { + xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); + BUG(); + } + boot_params_p->e820_entries = memmap.nr_entries; +} -- 2.16.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |