|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/3] slightly reduce vm_assist code
- drop an effectively unused struct pv_vcpu field (x86)
- adjust VM_ASSIST() to prepend VMASST_TYPE_
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -901,7 +901,6 @@ int arch_set_info_guest(
v->arch.pv_vcpu.event_callback_cs = c(event_callback_cs);
v->arch.pv_vcpu.failsafe_callback_cs = c(failsafe_callback_cs);
}
- v->arch.pv_vcpu.vm_assist = c(vm_assist);
/* Only CR0.TS is modifiable by guest or admin. */
v->arch.pv_vcpu.ctrlreg[0] &= X86_CR0_TS;
@@ -973,7 +972,7 @@ int arch_set_info_guest(
case -ERESTART:
break;
case 0:
- if ( !compat && !VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
+ if ( !compat && !VM_ASSIST(d, m2p_strict) &&
!paging_mode_refcounts(d) )
{
l4_pgentry_t *l4tab = __map_domain_page(cr3_page);
@@ -1023,7 +1022,7 @@ int arch_set_info_guest(
cr3_page = NULL;
break;
case 0:
- if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ if ( VM_ASSIST(d, m2p_strict) )
{
l4_pgentry_t *l4tab = __map_domain_page(cr3_page);
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1436,7 +1436,6 @@ void arch_get_info_guest(struct vcpu *v,
c(event_callback_cs = v->arch.pv_vcpu.event_callback_cs);
c(failsafe_callback_cs = v->arch.pv_vcpu.failsafe_callback_cs);
}
- c(vm_assist = v->arch.pv_vcpu.vm_assist);
/* IOPL privileges are virtualised: merge back into returned eflags. */
BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1454,7 +1454,7 @@ static int alloc_l4_table(struct page_in
adjust_guest_l4e(pl4e[i], d);
}
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, VMASST_TYPE_m2p_strict));
+ init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
unmap_domain_page(pl4e);
return rc > 0 ? 0 : rc;
@@ -2765,7 +2765,7 @@ int new_guest_cr3(unsigned long mfn)
invalidate_shadow_ldt(curr, 0);
- if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) && !paging_mode_refcounts(d) )
+ if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
{
l4_pgentry_t *l4tab = map_domain_page(mfn);
@@ -3135,8 +3135,7 @@ long do_mmuext_op(
op.arg1.mfn);
break;
}
- if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
- !paging_mode_refcounts(d) )
+ if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
{
l4_pgentry_t *l4tab = map_domain_page(op.arg1.mfn);
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1436,7 +1436,7 @@ void sh_install_xen_entries_in_l4(struct
shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
__PAGE_HYPERVISOR);
- if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ if ( !VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
/* Shadow linear mapping for 4-level shadows. N.B. for 3-level
@@ -3983,11 +3983,11 @@ sh_update_cr3(struct vcpu *v, int do_loc
shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
if ( (v->arch.flags & TF_kernel_mode) &&
- !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ !VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
else if ( !(v->arch.flags & TF_kernel_mode) &&
- VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
shadow_l4e_empty();
}
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1441,7 +1441,7 @@ static int fixup_page_fault(unsigned lon
!(regs->error_code & (PFEC_reserved_bit | PFEC_insn_fetch)) &&
(regs->error_code & PFEC_write_access) )
{
- if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) &&
+ if ( VM_ASSIST(d, writable_pagetables) &&
/* Do not check if access-protection fault since the page may
legitimately be not present in shadow page tables */
(paging_mode_enabled(d) ||
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -306,7 +306,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
{
case 0:
fi.submap = (1U << XENFEAT_memory_op_vnode_supported);
- if ( VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3) )
+ if ( VM_ASSIST(d, pae_extended_cr3) )
fi.submap |= (1U << XENFEAT_pae_pgdir_above_4gb);
if ( paging_mode_translate(d) )
fi.submap |=
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -372,8 +372,6 @@ struct pv_vcpu
};
};
- unsigned long vm_assist;
-
unsigned long syscall32_callback_eip;
unsigned long sysenter_callback_eip;
unsigned short syscall32_callback_cs;
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -833,7 +833,7 @@ void watchdog_domain_destroy(struct doma
/* This check is for functionality specific to a control domain */
#define is_control_domain(_d) ((_d)->is_privileged)
-#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
+#define VM_ASSIST(d, t) (test_bit(VMASST_TYPE_ ## t, &(d)->vm_assist))
#define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
#define is_pv_vcpu(v) (is_pv_domain((v)->domain))
Attachment:
vm-assist-cleanup.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |