[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3.1 14/15] xen/x86: hack to setup PVHv2 Dom0 CPUs
Initialize Dom0 BSP/APs and setup the memory and IO permissions. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- DO NOT APPLY. The logic used to setup the CPUID leaves is clearly lacking. This patch will be rebased on top of Andrew's CPUID work, that will move CPUID setup from libxc into Xen. For the time being this is needed in order to be able to boot a PVHv2 Dom0, in order to test the rest of the patches. --- xen/arch/x86/domain_build.c | 97 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c index 168be62..1ebc21f 100644 --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -40,6 +40,7 @@ #include <public/version.h> #include <public/arch-x86/hvm/start_info.h> +#include <public/hvm/hvm_vcpu.h> static long __initdata dom0_nrpages; static long __initdata dom0_min_nrpages; @@ -2024,6 +2025,93 @@ out: return rc; } +static int __init hvm_setup_cpus(struct domain *d, paddr_t entry, + paddr_t start_info) +{ + vcpu_hvm_context_t cpu_ctx; + struct vcpu *v = d->vcpu[0]; + int cpu, i, rc; + struct { + uint32_t index; + uint32_t count; + } cpuid_leaves[] = { + {0, XEN_CPUID_INPUT_UNUSED}, + {1, XEN_CPUID_INPUT_UNUSED}, + {2, XEN_CPUID_INPUT_UNUSED}, + {4, 0}, + {4, 1}, + {4, 2}, + {4, 3}, + {4, 4}, + {7, 0}, + {0xa, XEN_CPUID_INPUT_UNUSED}, + {0xd, 0}, + {0x80000000, XEN_CPUID_INPUT_UNUSED}, + {0x80000001, XEN_CPUID_INPUT_UNUSED}, + {0x80000002, XEN_CPUID_INPUT_UNUSED}, + {0x80000003, XEN_CPUID_INPUT_UNUSED}, + {0x80000004, XEN_CPUID_INPUT_UNUSED}, + {0x80000005, XEN_CPUID_INPUT_UNUSED}, + {0x80000006, XEN_CPUID_INPUT_UNUSED}, + {0x80000007, XEN_CPUID_INPUT_UNUSED}, + {0x80000008, XEN_CPUID_INPUT_UNUSED}, + }; + + cpu = v->processor; + for ( i = 1; i < d->max_vcpus; i++ ) + { + cpu = cpumask_cycle(cpu, &dom0_cpus); + setup_dom0_vcpu(d, i, cpu); + } + + memset(&cpu_ctx, 0, sizeof(cpu_ctx)); + + cpu_ctx.mode = VCPU_HVM_MODE_32B; + + cpu_ctx.cpu_regs.x86_32.ebx = start_info; + cpu_ctx.cpu_regs.x86_32.eip = entry; + cpu_ctx.cpu_regs.x86_32.cr0 = X86_CR0_PE | X86_CR0_ET; + + cpu_ctx.cpu_regs.x86_32.cs_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.ds_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.ss_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.tr_limit = 0x67; + cpu_ctx.cpu_regs.x86_32.cs_ar = 0xc9b; + cpu_ctx.cpu_regs.x86_32.ds_ar = 0xc93; + cpu_ctx.cpu_regs.x86_32.ss_ar = 0xc93; + cpu_ctx.cpu_regs.x86_32.tr_ar = 0x8b; + + rc = arch_set_info_hvm_guest(v, &cpu_ctx); + if ( rc ) + { + printk("Unable to setup Dom0 BSP context: %d\n", rc); + return rc; + } + + for ( i = 0; i < ARRAY_SIZE(cpuid_leaves); i++ ) + { + d->arch.cpuids[i].input[0] = cpuid_leaves[i].index; + d->arch.cpuids[i].input[1] = cpuid_leaves[i].count; + cpuid_count(d->arch.cpuids[i].input[0], d->arch.cpuids[i].input[1], + &d->arch.cpuids[i].eax, &d->arch.cpuids[i].ebx, + &d->arch.cpuids[i].ecx, &d->arch.cpuids[i].edx); + /* XXX: we need to do much more filtering here. */ + if ( d->arch.cpuids[i].input[0] == 1 ) + d->arch.cpuids[i].ecx &= ~X86_FEATURE_VMX; + } + + rc = setup_permissions(d); + if ( rc ) + { + panic("Unable to setup Dom0 permissions: %d\n", rc); + return rc; + } + + update_domain_wallclock_time(d); + + return 0; +} + static int __init construct_dom0_hvm(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, @@ -2056,6 +2144,15 @@ static int __init construct_dom0_hvm(struct domain *d, const module_t *image, return rc; } + rc = hvm_setup_cpus(d, entry, start_info); + if ( rc ) + { + printk("Failed to setup Dom0 CPUs: %d\n", rc); + return rc; + } + + clear_bit(_VPF_down, &d->vcpu[0]->pause_flags); + return 0; } -- 2.7.4 (Apple Git-66) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |