[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/3] x86: drop is_pv_32on64_domain()
... as being identical to is_pv_32bit_domain() after the x86-32 removal. In a few cases this includes no longer open-coding is_pv_32bit_vcpu(). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -367,7 +367,7 @@ int switch_native(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) return 0; d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0; @@ -392,7 +392,7 @@ int switch_compat(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) return 0; d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; @@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v) v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features); - rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0; + rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0; done: if ( rc ) { @@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s hv_cr4_mask = ~X86_CR4_TSD; if ( cpu_has_de ) hv_cr4_mask &= ~X86_CR4_DE; - if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) ) + if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) ) hv_cr4_mask &= ~X86_CR4_FSGSBASE; if ( cpu_has_xsave ) hv_cr4_mask &= ~X86_CR4_OSXSAVE; @@ -721,7 +721,7 @@ int arch_set_info_guest( /* The context is a compat-mode one if the target domain is compat-mode; * we expect the tools to DTRT even in compat-mode callers. */ - compat = is_pv_32on64_domain(d); + compat = is_pv_32bit_domain(d); #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld)) flags = c(flags); @@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n all_segs_okay &= loadsegment(gs, uregs->gs); } - if ( !is_pv_32on64_domain(n->domain) ) + if ( !is_pv_32bit_vcpu(n) ) { /* This can only be non-zero if selector is NULL. */ if ( n->arch.pv_vcpu.fs_base ) @@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n (unsigned long *)pv->kernel_sp; unsigned long cs_and_mask, rflags; - if ( is_pv_32on64_domain(n->domain) ) + if ( is_pv_32bit_vcpu(n) ) { unsigned int *esp = ring_1(regs) ? (unsigned int *)regs->rsp : @@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v if ( regs->es ) dirty_segment_mask |= DIRTY_ES; - if ( regs->fs || is_pv_32on64_domain(v->domain) ) + if ( regs->fs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_FS; v->arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */ @@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v dirty_segment_mask |= DIRTY_FS_BASE; } - if ( regs->gs || is_pv_32on64_domain(v->domain) ) + if ( regs->gs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_GS; v->arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */ @@ -1483,8 +1483,8 @@ static void __context_switch(void) psr_ctxt_switch_to(nd); - gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) : - per_cpu(compat_gdt_table, cpu); + gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) : + per_cpu(compat_gdt_table, cpu); if ( need_full_gdt(nd) ) { unsigned long mfn = virt_to_mfn(gdt); @@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s if ( is_pv_domain(nextd) && (is_idle_domain(prevd) || has_hvm_container_domain(prevd) || - is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) ) + is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) ) { uint64_t efer = read_efer(); if ( !(efer & EFER_SCE) ) --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -293,7 +293,7 @@ static unsigned long __init compute_dom0 avail -= (d->max_vcpus - 1UL) << get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) avail -= d->max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ @@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s BUG_ON(rc); return; } - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) ((unsigned long *)vphysmap_s)[pfn] = mfn; else ((unsigned int *)vphysmap_s)[pfn] = mfn; @@ -718,7 +718,7 @@ static __init void mark_pv_pt_pages_rdon /* Top-level p.t. is pinned. */ if ( (page->u.inuse.type_info & PGT_type_mask) == - (!is_pv_32on64_domain(d) ? + (!is_pv_32bit_domain(d) ? PGT_l4_page_table : PGT_l3_page_table) ) { page->count_info += 1; @@ -1048,7 +1048,7 @@ int __init construct_dom0( vinitrd_end = vinitrd_start + initrd_len; vphysmap_start = round_pgup(vinitrd_end); } - vphysmap_end = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ? + vphysmap_end = vphysmap_start + (nr_pages * (!is_pv_32bit_domain(d) ? sizeof(unsigned long) : sizeof(unsigned int))); if ( parms.p2m_base != UNSET_ADDR ) @@ -1076,9 +1076,9 @@ int __init construct_dom0( #define NR(_l,_h,_s) \ (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \ ((_l) & ~((1UL<<(_s))-1))) >> (_s)) - if ( (!is_pv_32on64_domain(d) + /* # L4 */ + if ( (!is_pv_32bit_domain(d) + /* # L4 */ NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */ - (!is_pv_32on64_domain(d) ? + (!is_pv_32bit_domain(d) ? NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */ 4) + /* # compat L2 */ NR(v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */ @@ -1176,7 +1176,7 @@ int __init construct_dom0( mpt_alloc -= PAGE_ALIGN(initrd_len); /* Overlap with Xen protected area? */ - if ( !is_pv_32on64_domain(d) ? + if ( !is_pv_32bit_domain(d) ? ((v_start < HYPERVISOR_VIRT_END) && (v_end > HYPERVISOR_VIRT_START)) : (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) ) @@ -1186,14 +1186,14 @@ int __init construct_dom0( goto out; } - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) { v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS; v->arch.pv_vcpu.event_callback_cs = FLAT_COMPAT_KERNEL_CS; } /* WARNING: The new domain must have its 'processor' field filled in! */ - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; @@ -1211,7 +1211,7 @@ int __init construct_dom0( clear_page(l4tab); init_guest_l4_table(l4tab, d, 0); v->arch.guest_table = pagetable_from_paddr(__pa(l4start)); - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) v->arch.guest_table_user = v->arch.guest_table; l4tab += l4_table_offset(v_start); @@ -1257,7 +1257,7 @@ int __init construct_dom0( mfn = pfn++; else mfn = initrd_mfn++; - *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ? + *l1tab = l1e_from_pfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT : COMPAT_L1_PROT)); l1tab++; @@ -1270,7 +1270,7 @@ int __init construct_dom0( } } - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) { /* Ensure the first four L3 entries are all populated. */ for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab ) @@ -1477,7 +1477,7 @@ int __init construct_dom0( if ( is_pvh_domain(d) ) si->shared_info = shared_info_paddr; - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) xlat_start_info(si, XLAT_start_info_console_dom0); /* Return to idle domain's page tables. */ @@ -1499,10 +1499,10 @@ int __init construct_dom0( */ regs = &v->arch.user_regs; regs->ds = regs->es = regs->fs = regs->gs = - !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS; - regs->ss = (!is_pv_32on64_domain(d) ? + !is_pv_32bit_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS; + regs->ss = (!is_pv_32bit_domain(d) ? FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS); - regs->cs = (!is_pv_32on64_domain(d) ? + regs->cs = (!is_pv_32bit_domain(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS); regs->eip = parms.virt_entry; regs->esp = vstack_end; --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -349,7 +349,7 @@ long arch_do_domctl( case XEN_DOMCTL_get_address_size: domctl->u.address_size.size = - is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG; + is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG; copyback = 1; break; @@ -1183,7 +1183,7 @@ void arch_get_info_guest(struct vcpu *v, { unsigned int i; const struct domain *d = v->domain; - bool_t compat = is_pv_32on64_domain(d); + bool_t compat = is_pv_32bit_domain(d); #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld)) if ( !is_pv_domain(d) ) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -163,9 +163,8 @@ static uint32_t base_disallow_mask; #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL) #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE) -#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \ - base_disallow_mask : \ - 0xFFFFF198U) +#define l3_disallow_mask(d) (!is_pv_32bit_domain(d) ? \ + base_disallow_mask : 0xFFFFF198U) #define L4_DISALLOW_MASK (base_disallow_mask) @@ -985,7 +984,7 @@ get_page_from_l4e( #define adjust_guest_l1e(pl1e, d) \ do { \ if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \ - likely(!is_pv_32on64_domain(d)) ) \ + likely(!is_pv_32bit_domain(d)) ) \ { \ /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */ \ if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \ @@ -1002,14 +1001,14 @@ get_page_from_l4e( #define adjust_guest_l2e(pl2e, d) \ do { \ if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) && \ - likely(!is_pv_32on64_domain(d)) ) \ + likely(!is_pv_32bit_domain(d)) ) \ l2e_add_flags((pl2e), _PAGE_USER); \ } while ( 0 ) #define adjust_guest_l3e(pl3e, d) \ do { \ if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \ - l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \ + l3e_add_flags((pl3e), likely(!is_pv_32bit_domain(d)) ? \ _PAGE_USER : \ _PAGE_USER|_PAGE_RW); \ } while ( 0 ) @@ -1017,13 +1016,13 @@ get_page_from_l4e( #define adjust_guest_l4e(pl4e, d) \ do { \ if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) && \ - likely(!is_pv_32on64_domain(d)) ) \ + likely(!is_pv_32bit_domain(d)) ) \ l4e_add_flags((pl4e), _PAGE_USER); \ } while ( 0 ) #define unadjust_guest_l3e(pl3e, d) \ do { \ - if ( unlikely(is_pv_32on64_domain(d)) && \ + if ( unlikely(is_pv_32bit_domain(d)) && \ likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \ l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \ } while ( 0 ) @@ -1314,7 +1313,7 @@ static int alloc_l3_table(struct page_in * 512 entries must be valid/verified, which is most easily achieved * by clearing them out. */ - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e)); for ( i = page->nr_validated_ptes; i < L3_PAGETABLE_ENTRIES; @@ -1391,7 +1390,7 @@ void init_guest_l4_table(l4_pgentry_t l4 l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR); l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR); - if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) ) + if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) ) l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty(); } @@ -2707,7 +2706,7 @@ int new_guest_cr3(unsigned long mfn) int rc; unsigned long old_base_mfn; - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) { unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table); l4_pgentry_t *pl4e = map_domain_page(gt_mfn); @@ -2856,7 +2855,7 @@ static inline int vcpumask_to_pcpumask( unsigned int vcpu_id, vcpu_bias, offs; unsigned long vmask; struct vcpu *v; - bool_t is_native = !is_pv_32on64_domain(d); + bool_t is_native = !is_pv_32bit_domain(d); cpumask_clear(pmask); for ( vmask = 0, offs = 0; ; ++offs) @@ -5165,7 +5164,7 @@ int ptwr_do_page_fault(struct vcpu *v, u ptwr_ctxt.ctxt.regs = regs; ptwr_ctxt.ctxt.force_writeback = 0; ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size = - is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG; + is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG; ptwr_ctxt.ctxt.swint_emulate = x86_swint_emulate_none; ptwr_ctxt.cr2 = addr; ptwr_ctxt.pte = pte; @@ -5235,10 +5234,9 @@ static const struct x86_emulate_ops mmio int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr, struct cpu_user_regs *regs) { - l1_pgentry_t pte; - unsigned long mfn; - unsigned int addr_size = is_pv_32on64_domain(v->domain) ? - 32 : BITS_PER_LONG; + l1_pgentry_t pte; + unsigned long mfn; + unsigned int addr_size = is_pv_32bit_vcpu(v) ? 32 : BITS_PER_LONG; struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .ctxt.regs = regs, .ctxt.addr_size = addr_size, --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -2110,7 +2110,7 @@ void sh_destroy_shadow(struct domain *d, t == SH_type_fl1_pae_shadow || t == SH_type_fl1_64_shadow || t == SH_type_monitor_table || - (is_pv_32on64_domain(d) && t == SH_type_l4_64_shadow) || + (is_pv_32bit_domain(d) && t == SH_type_l4_64_shadow) || (page_get_owner(mfn_to_page(backpointer(sp))) == d)); /* The down-shifts here are so that the switch statement is on nice @@ -2139,7 +2139,7 @@ void sh_destroy_shadow(struct domain *d, SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn); break; case SH_type_l2h_64_shadow: - ASSERT(is_pv_32on64_domain(d)); + ASSERT(is_pv_32bit_domain(d)); /* Fall through... */ case SH_type_l2_64_shadow: SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn); @@ -3472,7 +3472,7 @@ static int sh_enable_log_dirty(struct do /* 32bit PV guests on 64bit xen behave like older 64bit linux: they * change an l4e instead of cr3 to switch tables. Give them the * same optimization */ - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL; #endif --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -131,8 +131,8 @@ set_shadow_status(struct domain *d, mfn_ ASSERT(mfn_to_page(smfn)->u.sh.head); - /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */ - if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow ) + /* 32-bit PV guests don't own their l4 pages so can't get_page them */ + if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow ) { res = get_page(mfn_to_page(gmfn), d); ASSERT(res == 1); @@ -159,8 +159,8 @@ delete_shadow_status(struct domain *d, m d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn); - /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */ - if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow ) + /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */ + if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow ) put_page(mfn_to_page(gmfn)); } @@ -698,7 +698,7 @@ _sh_propagate(struct vcpu *v, // PV guests in 64-bit mode use two different page tables for user vs // supervisor permissions, making the guest's _PAGE_USER bit irrelevant. // It is always shadowed as present... - if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) + if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d) && is_pv_domain(d) ) { sflags |= _PAGE_USER; @@ -1346,8 +1346,8 @@ do { for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ { \ if ( (!(_xen)) \ - || !is_pv_32on64_domain(_dom) \ - || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\ + || !is_pv_32bit_domain(_dom) \ + || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow \ || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \ { \ (_sl2e) = _sp + _i; \ @@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg), __PAGE_HYPERVISOR); - if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) && + if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) && !VM_ASSIST(d, m2p_strict) ) { /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */ @@ -1475,7 +1475,7 @@ static void sh_install_xen_entries_in_l2 { shadow_l2e_t *sl2e; - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) return; sl2e = sh_map_domain_page(sl2hmfn); @@ -1620,9 +1620,9 @@ sh_make_monitor_table(struct vcpu *v) l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR); sh_unmap_domain_page(l3e); - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) { - /* For 32-on-64 PV guests, we need to map the 32-bit Xen + /* For 32-bit PV guests, we need to map the 32-bit Xen * area into its usual VAs in the monitor tables */ m3mfn = shadow_alloc(d, SH_type_monitor_table, 0); mfn_to_page(m3mfn)->shadow_flags = 3; @@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_cre unsigned int t = SH_type_l2_shadow; /* Tag compat L2 containing hypervisor (m2p) mappings */ - if ( is_pv_32on64_domain(v->domain) && + if ( is_pv_32bit_vcpu(v) && guest_l4_table_offset(gw->va) == 0 && guest_l3_table_offset(gw->va) == 3 ) t = SH_type_l2h_shadow; @@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcp sh_unmap_domain_page(l3e); shadow_free(d, m3mfn); - if ( is_pv_32on64_domain(d) ) + if ( is_pv_32bit_domain(d) ) { /* Need to destroy the l3 and l2 monitor pages that map the * Xen VAs at 3GB-4GB */ @@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_loc (unsigned long)pagetable_get_pfn(v->arch.guest_table)); #if GUEST_PAGING_LEVELS == 4 - if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) ) + if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) ) gmfn = pagetable_get_mfn(v->arch.guest_table_user); else #endif @@ -4078,7 +4078,7 @@ sh_update_cr3(struct vcpu *v, int do_loc if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 ) flush_tlb_mask(d->domain_dirty_cpumask); sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow); - if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) ) + if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) ) { mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]); @@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mf gmfn = get_shadow_status(d, get_gfn_query_unlocked( d, gfn_x(gfn), &p2mt), ((GUEST_PAGING_LEVELS == 3 || - is_pv_32on64_domain(d)) + is_pv_32bit_domain(d)) && !shadow_mode_external(d) && (guest_index(gl3e) % 4) == 3) ? SH_type_l2h_shadow --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -951,7 +951,7 @@ void pv_cpuid(struct cpu_user_regs *regs __clear_bit(X86_FEATURE_LM % 32, &d); __clear_bit(X86_FEATURE_LAHF_LM % 32, &c); } - if ( is_pv_32on64_domain(currd) && + if ( is_pv_32bit_domain(currd) && boot_cpu_data.x86_vendor != X86_VENDOR_AMD ) __clear_bit(X86_FEATURE_SYSCALL % 32, &d); __clear_bit(X86_FEATURE_PAGE1GB % 32, &d); @@ -3675,7 +3675,7 @@ long register_guest_nmi_callback(unsigne t->vector = TRAP_nmi; t->flags = 0; - t->cs = (is_pv_32on64_domain(d) ? + t->cs = (is_pv_32bit_domain(d) ? FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS); t->address = address; TI_SET_IF(t, 1); --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1188,7 +1188,7 @@ int handle_memadd_fault(unsigned long ad unsigned long mfn, idle_index; int ret = 0; - if (!is_pv_32on64_domain(d)) + if (!is_pv_32bit_domain(d)) return 0; if ( (addr < HYPERVISOR_COMPAT_VIRT_START(d)) || @@ -1247,7 +1247,7 @@ unmap: void domain_set_alloc_bitsize(struct domain *d) { - if ( !is_pv_32on64_domain(d) || + if ( !is_pv_32bit_domain(d) || (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) || d->arch.physaddr_bitsize > 0 ) return; --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -495,7 +495,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe break; #ifdef CONFIG_COMPAT - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1); else ret = copy_from_guest(c.cmp, @@ -901,7 +901,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe vcpu_unpause(v); #ifdef CONFIG_COMPAT - if ( !is_pv_32on64_domain(d) ) + if ( !is_pv_32bit_domain(d) ) ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); else ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt, --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_ static uint16_t kexec_load_v1_arch(void) { #ifdef CONFIG_X86 - return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64; + return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64; #else return EM_NONE; #endif --- a/xen/common/xenoprof.c +++ b/xen/common/xenoprof.c @@ -219,7 +219,7 @@ static int alloc_xenoprof_struct( bufsize = sizeof(struct xenoprof_buf); i = sizeof(struct event_log); #ifdef CONFIG_COMPAT - d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d); + d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d); if ( XENOPROF_COMPAT(d->xenoprof) ) { bufsize = sizeof(struct compat_oprof_buf); --- a/xen/include/asm-x86/desc.h +++ b/xen/include/asm-x86/desc.h @@ -65,7 +65,7 @@ */ #define guest_gate_selector_okay(d, sel) \ ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \ - ((sel) == (!is_pv_32on64_domain(d) ? \ + ((sel) == (!is_pv_32bit_domain(d) ? \ FLAT_KERNEL_CS : /* Xen default seg? */ \ FLAT_COMPAT_KERNEL_CS)) || \ ((sel) & 4)) /* LDT seg? */ --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -14,7 +14,6 @@ #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo) #define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv) #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) -#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) Attachment:
x86-drop-is_32on64_domain.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |