[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] pvh prep: introduce pv guest type and has_hvm_container macros
commit 6c6492780eac70db6c0b9e3a6a485052db574553 Author: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> AuthorDate: Wed Nov 13 09:30:09 2013 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Nov 13 09:30:09 2013 +0100 pvh prep: introduce pv guest type and has_hvm_container macros The goal of this patch is to classify conditionals more clearly, as to whether they relate to pv guests, hvm-only guests, or guests with an "hvm container" (which will eventually include PVH). This patch introduces an enum for guest type, as well as two new macros for switching behavior on and off: is_pv_* and has_hvm_container_*. At the moment is_pv_* <=> !has_hvm_container_*. The purpose of having two is that it seems to me different to take a path because something does *not* have PV structures as to take a path because it *does* have HVM structures, even if the two happen to coincide 100% at the moment. The exact usage is occasionally a bit fuzzy though, and a judgement call just needs to be made on which is clearer. In general, a switch should use is_pv_* (or !is_pv_*) if the code in question relates directly to a PV guest. Examples include use of pv_vcpu structs or other behavior directly related to PV domains. hvm_container is more of a fuzzy concept, but in general: * Most core HVM behavior will be included in this. Behavior not appropriate for PVH mode will be disabled in later patches * Hypercalls related to HVM guests will *not* be included by default; functionality needed by PVH guests will be enabled in future patches * The following functionality are not considered part of the HVM container, and PVH will end up behaving like PV by default: Event channel, vtsc offset, code related to emulated timers, nested HVM, emuirq, PoD * Some features are left to implement for PVH later: vpmu, shadow mode Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx> --- xen/arch/x86/acpi/suspend.c | 2 +- xen/arch/x86/cpu/mcheck/vmce.c | 6 ++-- xen/arch/x86/debug.c | 2 +- xen/arch/x86/domain.c | 54 ++++++++++++++++++------------------ xen/arch/x86/domain_page.c | 10 +++--- xen/arch/x86/domctl.c | 10 +++--- xen/arch/x86/efi/runtime.c | 4 +- xen/arch/x86/hvm/vmx/vmcs.c | 4 +- xen/arch/x86/mm.c | 6 ++-- xen/arch/x86/mm/shadow/common.c | 6 ++-- xen/arch/x86/mm/shadow/multi.c | 4 +- xen/arch/x86/physdev.c | 4 +- xen/arch/x86/traps.c | 5 ++- xen/arch/x86/x86_64/mm.c | 2 +- xen/arch/x86/x86_64/traps.c | 8 ++-- xen/common/domain.c | 2 +- xen/common/grant_table.c | 4 +- xen/common/kernel.c | 2 +- xen/include/asm-x86/domain.h | 2 +- xen/include/asm-x86/event.h | 2 +- xen/include/asm-x86/guest_access.h | 12 ++++---- xen/include/asm-x86/guest_pt.h | 4 +- xen/include/xen/sched.h | 14 +++++++-- xen/include/xen/tmem_xen.h | 2 +- 24 files changed, 90 insertions(+), 81 deletions(-) diff --git a/xen/arch/x86/acpi/suspend.c b/xen/arch/x86/acpi/suspend.c index 6fdd876..1718930 100644 --- a/xen/arch/x86/acpi/suspend.c +++ b/xen/arch/x86/acpi/suspend.c @@ -85,7 +85,7 @@ void restore_rest_processor_state(void) BUG(); /* Maybe load the debug registers. */ - BUG_ON(is_hvm_vcpu(curr)); + BUG_ON(!is_pv_vcpu(curr)); if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] ) { write_debugreg(0, curr->arch.debugreg[0]); diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index af3b491..f6c35db 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -83,7 +83,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt) { dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities" " %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n", - is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps, + has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps, v->domain->domain_id, v->vcpu_id, guest_mcg_cap & ~MCG_CAP_COUNT); return -EPERM; @@ -357,7 +357,7 @@ int inject_vmce(struct domain *d, int vcpu) if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id ) continue; - if ( (is_hvm_domain(d) || + if ( (has_hvm_container_domain(d) || guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) && !test_and_set_bool(v->mce_pending) ) { @@ -439,7 +439,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn) if (!mfn_valid(mfn_x(mfn))) return -EINVAL; - if ( !is_hvm_domain(d) || !paging_mode_hap(d) ) + if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) ) return -ENOSYS; rc = -1; diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c index e67473e..3e21ca8 100644 --- a/xen/arch/x86/debug.c +++ b/xen/arch/x86/debug.c @@ -158,7 +158,7 @@ dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, struct domain *dp, pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len); - mfn = (dp->is_hvm + mfn = (has_hvm_container_domain(dp) ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn) : dbg_pv_va2mfn(addr, dp, pgd3)); if ( mfn == INVALID_MFN ) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index b67fcb8..358616c 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -167,7 +167,7 @@ void dump_pageframe_info(struct domain *d) spin_unlock(&d->page_alloc_lock); } - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) p2m_pod_dump_data(d); spin_lock(&d->page_alloc_lock); @@ -385,7 +385,7 @@ int vcpu_initialise(struct vcpu *v) vmce_init_vcpu(v); - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) { rc = hvm_vcpu_initialise(v); goto done; @@ -438,7 +438,7 @@ int vcpu_initialise(struct vcpu *v) { vcpu_destroy_fpu(v); - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) xfree(v->arch.pv_vcpu.trap_ctxt); } @@ -452,7 +452,7 @@ void vcpu_destroy(struct vcpu *v) vcpu_destroy_fpu(v); - if ( is_hvm_vcpu(v) ) + if ( has_hvm_container_vcpu(v) ) hvm_vcpu_destroy(v); else xfree(v->arch.pv_vcpu.trap_ctxt); @@ -464,7 +464,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) int rc = -ENOMEM; d->arch.hvm_domain.hap_enabled = - is_hvm_domain(d) && + has_hvm_container_domain(d) && hvm_funcs.hap_supported && (domcr_flags & DOMCRF_hap); d->arch.hvm_domain.mem_sharing_enabled = 0; @@ -490,7 +490,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) d->domain_id); } - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL); else if ( is_idle_domain(d) ) rc = 0; @@ -512,7 +512,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) mapcache_domain_init(d); HYPERVISOR_COMPAT_VIRT_START(d) = - is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START; + is_pv_domain(d) ? __HYPERVISOR_COMPAT_VIRT_START : ~0u; if ( (rc = paging_domain_init(d, domcr_flags)) != 0 ) goto fail; @@ -554,7 +554,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) goto fail; } - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) { if ( (rc = hvm_domain_initialise(d)) != 0 ) { @@ -583,14 +583,14 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) if ( paging_initialised ) paging_final_teardown(d); free_perdomain_mappings(d); - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); return rc; } void arch_domain_destroy(struct domain *d) { - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) hvm_domain_destroy(d); else xfree(d->arch.pv_domain.e820); @@ -602,7 +602,7 @@ void arch_domain_destroy(struct domain *d) paging_final_teardown(d); free_perdomain_mappings(d); - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); free_xenheap_page(d->shared_info); @@ -653,7 +653,7 @@ int arch_set_info_guest( #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld)) flags = c(flags); - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) { if ( !compat ) { @@ -698,7 +698,7 @@ int arch_set_info_guest( v->fpu_initialised = !!(flags & VGCF_I387_VALID); v->arch.flags &= ~TF_kernel_mode; - if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) + if ( (flags & VGCF_in_kernel) || has_hvm_container_vcpu(v)/*???*/ ) v->arch.flags |= TF_kernel_mode; v->arch.vgc_flags = flags; @@ -713,7 +713,7 @@ int arch_set_info_guest( if ( !compat ) { memcpy(&v->arch.user_regs, &c.nat->user_regs, sizeof(c.nat->user_regs)); - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt, sizeof(c.nat->trap_ctxt)); } @@ -729,7 +729,7 @@ int arch_set_info_guest( v->arch.user_regs.eflags |= 2; - if ( is_hvm_vcpu(v) ) + if ( has_hvm_container_vcpu(v) ) { hvm_set_info_guest(v); goto out; @@ -959,7 +959,7 @@ int arch_set_info_guest( int arch_vcpu_reset(struct vcpu *v) { - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) { destroy_gdt(v); return vcpu_destroy_pagetables(v); @@ -1309,7 +1309,7 @@ static void update_runstate_area(struct vcpu *v) static inline int need_full_gdt(struct vcpu *v) { - return (!is_hvm_vcpu(v) && !is_idle_vcpu(v)); + return (is_pv_vcpu(v) && !is_idle_vcpu(v)); } static void __context_switch(void) @@ -1435,9 +1435,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next) { __context_switch(); - if ( !is_hvm_vcpu(next) && + if ( is_pv_vcpu(next) && (is_idle_vcpu(prev) || - is_hvm_vcpu(prev) || + has_hvm_container_vcpu(prev) || is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) ) { uint64_t efer = read_efer(); @@ -1448,13 +1448,13 @@ void context_switch(struct vcpu *prev, struct vcpu *next) /* Re-enable interrupts before restoring state which may fault. */ local_irq_enable(); - if ( !is_hvm_vcpu(next) ) + if ( is_pv_vcpu(next) ) { load_LDT(next); load_segments(next); } - set_cpuid_faulting(!is_hvm_vcpu(next) && + set_cpuid_faulting(is_pv_vcpu(next) && (next->domain->domain_id != 0)); } @@ -1537,7 +1537,7 @@ void hypercall_cancel_continuation(void) } else { - if ( !is_hvm_vcpu(current) ) + if ( is_pv_vcpu(current) ) regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */ else current->arch.hvm_vcpu.hcall_preempted = 0; @@ -1574,12 +1574,12 @@ unsigned long hypercall_create_continuation( regs->eax = op; /* Ensure the hypercall trap instruction is re-executed. */ - if ( !is_hvm_vcpu(current) ) + if ( is_pv_vcpu(current) ) regs->eip -= 2; /* re-execute 'syscall' / 'int $xx' */ else current->arch.hvm_vcpu.hcall_preempted = 1; - if ( !is_hvm_vcpu(current) ? + if ( is_pv_vcpu(current) ? !is_pv_32on64_vcpu(current) : (hvm_guest_x86_mode(current) == 8) ) { @@ -1851,7 +1851,7 @@ int domain_relinquish_resources(struct domain *d) return ret; } - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) { for_each_vcpu ( d, v ) { @@ -1924,7 +1924,7 @@ int domain_relinquish_resources(struct domain *d) BUG(); } - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) hvm_domain_relinquish_resources(d); return 0; @@ -2008,7 +2008,7 @@ void vcpu_mark_events_pending(struct vcpu *v) if ( already_pending ) return; - if ( is_hvm_vcpu(v) ) + if ( has_hvm_container_vcpu(v) ) hvm_assert_evtchn_irq(v); else vcpu_kick(v); diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index bc18263..3903952 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -35,7 +35,7 @@ static inline struct vcpu *mapcache_current_vcpu(void) * then it means we are running on the idle domain's page table and must * therefore use its mapcache. */ - if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) ) + if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) ) { /* If we really are idling, perform lazy context switch now. */ if ( (v = idle_vcpu[smp_processor_id()]) == current ) @@ -72,7 +72,7 @@ void *map_domain_page(unsigned long mfn) #endif v = mapcache_current_vcpu(); - if ( !v || is_hvm_vcpu(v) ) + if ( !v || !is_pv_vcpu(v) ) return mfn_to_virt(mfn); dcache = &v->domain->arch.pv_domain.mapcache; @@ -177,7 +177,7 @@ void unmap_domain_page(const void *ptr) ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); v = mapcache_current_vcpu(); - ASSERT(v && !is_hvm_vcpu(v)); + ASSERT(v && is_pv_vcpu(v)); dcache = &v->domain->arch.pv_domain.mapcache; ASSERT(dcache->inuse); @@ -244,7 +244,7 @@ int mapcache_domain_init(struct domain *d) struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache; unsigned int bitmap_pages; - if ( is_hvm_domain(d) || is_idle_domain(d) ) + if ( !is_pv_domain(d) || is_idle_domain(d) ) return 0; #ifdef NDEBUG @@ -275,7 +275,7 @@ int mapcache_vcpu_init(struct vcpu *v) unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES; unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long)); - if ( is_hvm_vcpu(v) || !dcache->inuse ) + if ( !is_pv_vcpu(v) || !dcache->inuse ) return 0; if ( ents > dcache->entries ) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index e75918a..f7e4586 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -800,7 +800,7 @@ long arch_do_domctl( if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext ) { evc->size = sizeof(*evc); - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) { evc->sysenter_callback_cs = v->arch.pv_vcpu.sysenter_callback_cs; @@ -833,7 +833,7 @@ long arch_do_domctl( ret = -EINVAL; if ( evc->size < offsetof(typeof(*evc), vmce) ) goto ext_vcpucontext_out; - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) { if ( !is_canonical_address(evc->sysenter_callback_eip) || !is_canonical_address(evc->syscall32_callback_eip) ) @@ -1246,7 +1246,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) bool_t compat = is_pv_32on64_domain(v->domain); #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld)) - if ( is_hvm_vcpu(v) ) + if ( !is_pv_vcpu(v) ) memset(c.nat, 0, sizeof(*c.nat)); memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt)); c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel)); @@ -1257,7 +1257,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) if ( !compat ) { memcpy(&c.nat->user_regs, &v->arch.user_regs, sizeof(c.nat->user_regs)); - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt, sizeof(c.nat->trap_ctxt)); } @@ -1272,7 +1272,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i ) c(debugreg[i] = v->arch.debugreg[i]); - if ( is_hvm_vcpu(v) ) + if ( has_hvm_container_vcpu(v) ) { struct segment_register sreg; diff --git a/xen/arch/x86/efi/runtime.c b/xen/arch/x86/efi/runtime.c index 37bb535..d7c884b 100644 --- a/xen/arch/x86/efi/runtime.c +++ b/xen/arch/x86/efi/runtime.c @@ -52,7 +52,7 @@ unsigned long efi_rs_enter(void) /* prevent fixup_page_fault() from doing anything */ irq_enter(); - if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) ) + if ( is_pv_vcpu(current) && !is_idle_vcpu(current) ) { struct desc_ptr gdt_desc = { .limit = LAST_RESERVED_GDT_BYTE, @@ -71,7 +71,7 @@ unsigned long efi_rs_enter(void) void efi_rs_leave(unsigned long cr3) { write_cr3(cr3); - if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) ) + if ( is_pv_vcpu(current) && !is_idle_vcpu(current) ) { struct desc_ptr gdt_desc = { .limit = LAST_RESERVED_GDT_BYTE, diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 19650ce..2f7c238 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -659,7 +659,7 @@ void vmx_vmcs_exit(struct vcpu *v) { /* Don't confuse vmx_do_resume (for @v or @current!) */ vmx_clear_vmcs(v); - if ( is_hvm_vcpu(current) ) + if ( has_hvm_container_vcpu(current) ) vmx_load_vmcs(current); spin_unlock(&v->arch.hvm_vmx.vmcs_lock); @@ -1488,7 +1488,7 @@ static void vmcs_dump(unsigned char ch) for_each_domain ( d ) { - if ( !is_hvm_domain(d) ) + if ( !has_hvm_container_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); for_each_vcpu ( d, v ) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 43aaceb..9621e22 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -181,7 +181,7 @@ static uint32_t base_disallow_mask; (rangeset_is_empty((d)->iomem_caps) && \ rangeset_is_empty((d)->arch.ioport_caps) && \ !has_arch_pdevs(d) && \ - !is_hvm_domain(d)) ? \ + is_pv_domain(d)) ? \ L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) static void __init init_frametable_chunk(void *start, void *end) @@ -433,7 +433,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long mem_type) unsigned long domain_get_maximum_gpfn(struct domain *d) { - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) return p2m_get_hostp2m(d)->max_mapped_pfn; /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */ return (arch_get_max_pfn(d) ?: 1) - 1; @@ -2381,7 +2381,7 @@ static int __get_page_type(struct page_info *page, unsigned long type, { /* Special pages should not be accessible from devices. */ struct domain *d = page_get_owner(page); - if ( d && !is_hvm_domain(d) && unlikely(need_iommu(d)) ) + if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) ) { if ( (x & PGT_type_mask) == PGT_writable_page ) iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page))); diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index adffa06..0bfa595 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -367,7 +367,7 @@ const struct x86_emulate_ops *shadow_init_emulation( sh_ctxt->ctxt.regs = regs; sh_ctxt->ctxt.force_writeback = 0; - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) { sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG; return &pv_shadow_emulator_ops; @@ -964,7 +964,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn) if ( pg->shadow_flags & ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync) || sh_page_has_multiple_shadows(pg) - || !is_hvm_domain(v->domain) + || is_pv_domain(v->domain) || !v->domain->arch.paging.shadow.oos_active ) return 0; @@ -2753,7 +2753,7 @@ static void sh_update_paging_modes(struct vcpu *v) if ( v->arch.paging.mode ) v->arch.paging.mode->shadow.detach_old_tables(v); - if ( !is_hvm_domain(d) ) + if ( is_pv_domain(d) ) { /// /// PV guest diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 3fed0b6..3d35537 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -712,7 +712,7 @@ _sh_propagate(struct vcpu *v, // supervisor permissions, making the guest's _PAGE_USER bit irrelevant. // It is always shadowed as present... if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) - && !is_hvm_domain(d) ) + && is_pv_domain(d) ) { sflags |= _PAGE_USER; } @@ -3922,7 +3922,7 @@ sh_update_cr3(struct vcpu *v, int do_locking) #endif /* Don't do anything on an uninitialised vcpu */ - if ( !is_hvm_domain(d) && !v->is_initialised ) + if ( is_pv_domain(d) && !v->is_initialised ) { ASSERT(v->arch.cr3 == 0); return; diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 4835ed7..dab6213 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -310,10 +310,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) spin_unlock(&v->domain->event_lock); break; } - if ( !is_hvm_domain(v->domain) && + if ( is_pv_domain(v->domain) && v->domain->arch.pv_domain.auto_unmask ) evtchn_unmask(pirq->evtchn); - if ( !is_hvm_domain(v->domain) || + if ( is_pv_domain(v->domain) || domain_pirq_to_irq(v->domain, eoi.irq) > 0 ) pirq_guest_eoi(pirq); if ( is_hvm_domain(v->domain) && diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 0e3c6e3..26ae722 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -120,6 +120,7 @@ static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs) unsigned long *stack, addr; unsigned long mask = STACK_SIZE; + /* Avoid HVM as we don't know what the stack looks like. */ if ( is_hvm_vcpu(v) ) return; @@ -547,7 +548,7 @@ static inline void do_trap( } if ( ((trapnr == TRAP_copro_error) || (trapnr == TRAP_simd_error)) && - is_hvm_vcpu(curr) && curr->arch.hvm_vcpu.fpu_exception_callback ) + has_hvm_container_vcpu(curr) && curr->arch.hvm_vcpu.fpu_exception_callback ) { curr->arch.hvm_vcpu.fpu_exception_callback( curr->arch.hvm_vcpu.fpu_exception_callback_arg, regs); @@ -702,7 +703,7 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx, *ebx = 0x40000200; *ecx = 0; /* Features 1 */ *edx = 0; /* Features 2 */ - if ( !is_hvm_vcpu(current) ) + if ( is_pv_vcpu(current) ) *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD; break; diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 2bdbad0..4a3b3f1 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -73,7 +73,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr) l2_pgentry_t l2e, *l2t; l1_pgentry_t l1e, *l1t; - if ( is_hvm_vcpu(v) || !is_canonical_address(addr) ) + if ( !is_pv_vcpu(v) || !is_canonical_address(addr) ) return NULL; l4t = map_domain_page(mfn); diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index ae93539..8e6a7c1 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -86,7 +86,7 @@ void show_registers(struct cpu_user_regs *regs) enum context context; struct vcpu *v = current; - if ( is_hvm_vcpu(v) && guest_mode(regs) ) + if ( has_hvm_container_vcpu(v) && guest_mode(regs) ) { struct segment_register sreg; context = CTXT_hvm_guest; @@ -147,8 +147,8 @@ void vcpu_show_registers(const struct vcpu *v) const struct cpu_user_regs *regs = &v->arch.user_regs; unsigned long crs[8]; - /* No need to handle HVM for now. */ - if ( is_hvm_vcpu(v) ) + /* Only handle PV guests for now */ + if ( !is_pv_vcpu(v) ) return; crs[0] = v->arch.pv_vcpu.ctrlreg[0]; @@ -624,7 +624,7 @@ static void hypercall_page_initialise_ring3_kernel(void *hypercall_page) void hypercall_page_initialise(struct domain *d, void *hypercall_page) { memset(hypercall_page, 0xCC, PAGE_SIZE); - if ( is_hvm_domain(d) ) + if ( has_hvm_container_domain(d) ) hvm_hypercall_page_initialise(d, hypercall_page); else if ( !is_pv_32bit_domain(d) ) hypercall_page_initialise_ring3_kernel(hypercall_page); diff --git a/xen/common/domain.c b/xen/common/domain.c index 63c47e8..aacf9c0 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -238,7 +238,7 @@ struct domain *domain_create( goto fail; if ( domcr_flags & DOMCRF_hvm ) - d->is_hvm = 1; + d->guest_type = guest_type_hvm; if ( domid == 0 ) { diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 21c6a14..107b000 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -721,7 +721,7 @@ __gnttab_map_grant_ref( double_gt_lock(lgt, rgt); - if ( !is_hvm_domain(ld) && need_iommu(ld) ) + if ( is_pv_domain(ld) && need_iommu(ld) ) { unsigned int wrc, rdc; int err = 0; @@ -931,7 +931,7 @@ __gnttab_unmap_common( act->pin -= GNTPIN_hstw_inc; } - if ( !is_hvm_domain(ld) && need_iommu(ld) ) + if ( is_pv_domain(ld) && need_iommu(ld) ) { unsigned int wrc, rdc; int err = 0; diff --git a/xen/common/kernel.c b/xen/common/kernel.c index 4ca50c4..97d9050 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -306,7 +306,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( current->domain == dom0 ) fi.submap |= 1U << XENFEAT_dom0; #ifdef CONFIG_X86 - if ( !is_hvm_vcpu(current) ) + if ( is_pv_vcpu(current) ) fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) | (1U << XENFEAT_highmem_assist) | (1U << XENFEAT_gnttab_map_avail_bits); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index e4da850..9b20268 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -16,7 +16,7 @@ #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) -#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ +#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h index 7edeb5b..a82062e 100644 --- a/xen/include/asm-x86/event.h +++ b/xen/include/asm-x86/event.h @@ -23,7 +23,7 @@ int hvm_local_events_need_delivery(struct vcpu *v); static inline int local_events_need_delivery(void) { struct vcpu *v = current; - return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) : + return (has_hvm_container_vcpu(v) ? hvm_local_events_need_delivery(v) : (vcpu_info(v, evtchn_upcall_pending) && !vcpu_info(v, evtchn_upcall_mask))); } diff --git a/xen/include/asm-x86/guest_access.h b/xen/include/asm-x86/guest_access.h index ca700c9..88edb3f 100644 --- a/xen/include/asm-x86/guest_access.h +++ b/xen/include/asm-x86/guest_access.h @@ -14,27 +14,27 @@ /* Raw access functions: no type checking. */ #define raw_copy_to_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ copy_to_user_hvm((dst), (src), (len)) : \ copy_to_user((dst), (src), (len))) #define raw_copy_from_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ copy_from_user_hvm((dst), (src), (len)) : \ copy_from_user((dst), (src), (len))) #define raw_clear_guest(dst, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ clear_user_hvm((dst), (len)) : \ clear_user((dst), (len))) #define __raw_copy_to_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ copy_to_user_hvm((dst), (src), (len)) : \ __copy_to_user((dst), (src), (len))) #define __raw_copy_from_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ copy_from_user_hvm((dst), (src), (len)) : \ __copy_from_user((dst), (src), (len))) #define __raw_clear_guest(dst, len) \ - (is_hvm_vcpu(current) ? \ + (has_hvm_container_vcpu(current) ? \ clear_user_hvm((dst), (len)) : \ clear_user((dst), (len))) diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h index b62bc6a..d2a8250 100644 --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -196,7 +196,7 @@ guest_supports_superpages(struct vcpu *v) /* The _PAGE_PSE bit must be honoured in HVM guests, whenever * CR4.PSE is set or the guest is in PAE or long mode. * It's also used in the dummy PT for vcpus with CR4.PG cleared. */ - return (!is_hvm_vcpu(v) + return (is_pv_vcpu(v) ? opt_allow_superpage : (GUEST_PAGING_LEVELS != 2 || !hvm_paging_enabled(v) @@ -214,7 +214,7 @@ guest_supports_nx(struct vcpu *v) { if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx ) return 0; - if ( !is_hvm_vcpu(v) ) + if ( is_pv_vcpu(v) ) return cpu_has_nx; return hvm_nx_enabled(v); } diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 2397537..a9dd15f 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -259,6 +259,10 @@ struct mem_event_per_domain struct evtchn_port_ops; +enum guest_type { + guest_type_pv, guest_type_hvm +}; + struct domain { domid_t domain_id; @@ -311,8 +315,8 @@ struct domain struct rangeset *iomem_caps; struct rangeset *irq_caps; - /* Is this an HVM guest? */ - bool_t is_hvm; + enum guest_type guest_type; + #ifdef HAS_PASSTHROUGH /* Does this guest need iommu mappings? */ bool_t need_iommu; @@ -772,8 +776,12 @@ void watchdog_domain_destroy(struct domain *d); #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist)) -#define is_hvm_domain(d) ((d)->is_hvm) +#define is_pv_domain(d) ((d)->guest_type == guest_type_pv) +#define is_pv_vcpu(v) (is_pv_domain((v)->domain)) +#define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm) #define is_hvm_vcpu(v) (is_hvm_domain(v->domain)) +#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv) +#define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain)) #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \ cpumask_weight((v)->cpu_affinity) == 1) #ifdef HAS_PASSTHROUGH diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index ad1ddd5..9fb7446 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -442,7 +442,7 @@ typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t; static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops) { #ifdef CONFIG_COMPAT - if ( is_hvm_vcpu(current) ? + if ( has_hvm_container_vcpu(current) ? hvm_guest_x86_mode(current) != 8 : is_pv_32on64_vcpu(current) ) { -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |