[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 4/4] vm-assist: slightly reduce source code size
Adjust VM_ASSIST() to prepend VMASST_TYPE_. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Split original patch into two. --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -972,7 +972,7 @@ int arch_set_info_guest( case -ERESTART: break; case 0: - if ( !compat && !VM_ASSIST(d, VMASST_TYPE_m2p_strict) && + if ( !compat && !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) ) fill_ro_mpt(cr3_gfn); break; @@ -1011,7 +1011,7 @@ int arch_set_info_guest( cr3_page = NULL; break; case 0: - if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) ) + if ( VM_ASSIST(d, m2p_strict) ) zap_ro_mpt(cr3_gfn); break; } --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1464,7 +1464,7 @@ static int alloc_l4_table(struct page_in adjust_guest_l4e(pl4e[i], d); } - init_guest_l4_table(pl4e, d, !VM_ASSIST(d, VMASST_TYPE_m2p_strict)); + init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict)); unmap_domain_page(pl4e); return rc > 0 ? 0 : rc; @@ -2774,7 +2774,7 @@ int new_guest_cr3(unsigned long mfn) invalidate_shadow_ldt(curr, 0); - if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) && !paging_mode_refcounts(d) ) + if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) ) fill_ro_mpt(mfn); curr->arch.guest_table = pagetable_from_pfn(mfn); update_cr3(curr); @@ -3133,8 +3133,7 @@ long do_mmuext_op( op.arg1.mfn); break; } - if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) && - !paging_mode_refcounts(d) ) + if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) ) zap_ro_mpt(op.arg1.mfn); } --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg), __PAGE_HYPERVISOR); - if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) ) + if ( !VM_ASSIST(d, m2p_strict) ) sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty(); /* Shadow linear mapping for 4-level shadows. N.B. for 3-level @@ -3983,11 +3983,11 @@ sh_update_cr3(struct vcpu *v, int do_loc shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable; if ( (v->arch.flags & TF_kernel_mode) && - !VM_ASSIST(d, VMASST_TYPE_m2p_strict) ) + !VM_ASSIST(d, m2p_strict) ) sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]; else if ( !(v->arch.flags & TF_kernel_mode) && - VM_ASSIST(d, VMASST_TYPE_m2p_strict) ) + VM_ASSIST(d, m2p_strict) ) sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty(); } --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1441,7 +1441,7 @@ static int fixup_page_fault(unsigned lon !(regs->error_code & (PFEC_reserved_bit | PFEC_insn_fetch)) && (regs->error_code & PFEC_write_access) ) { - if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) && + if ( VM_ASSIST(d, writable_pagetables) && /* Do not check if access-protection fault since the page may legitimately be not present in shadow page tables */ (paging_mode_enabled(d) || --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -306,7 +306,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL { case 0: fi.submap = (1U << XENFEAT_memory_op_vnode_supported); - if ( VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3) ) + if ( VM_ASSIST(d, pae_extended_cr3) ) fi.submap |= (1U << XENFEAT_pae_pgdir_above_4gb); if ( paging_mode_translate(d) ) fi.submap |= --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -833,7 +833,7 @@ void watchdog_domain_destroy(struct doma /* This check is for functionality specific to a control domain */ #define is_control_domain(_d) ((_d)->is_privileged) -#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist)) +#define VM_ASSIST(d, t) (test_bit(VMASST_TYPE_ ## t, &(d)->vm_assist)) #define is_pv_domain(d) ((d)->guest_type == guest_type_pv) #define is_pv_vcpu(v) (is_pv_domain((v)->domain)) Attachment:
vm-assist-simplify.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |