[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC V6 3/5] xen, libxc: Force-enable relevant MSR events
>>> On 11.08.14 at 17:08, <rcojocaru@xxxxxxxxxxxxxxx> wrote: > --- a/xen/arch/x86/hvm/vmx/vmcs.c > +++ b/xen/arch/x86/hvm/vmx/vmcs.c > @@ -39,6 +39,7 @@ > #include <xen/keyhandler.h> > #include <asm/shadow.h> > #include <asm/tboot.h> > +#include <asm/mem_event.h> > > static bool_t __read_mostly opt_vpid_enabled = 1; > boolean_param("vpid", opt_vpid_enabled); > @@ -695,11 +696,31 @@ static void vmx_set_host_env(struct vcpu *v) > void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type) > { > unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; > + struct domain *d = v->domain; > > /* VMX MSR bitmap supported? */ > if ( msr_bitmap == NULL ) > return; > > + if ( mem_event_check_ring(&d->mem_event->access) && > + d->arch.hvm_domain.introspection_enabled ) > + { > + /* Filter out MSR-s needed for memory introspection */ > + switch ( msr ) > + { > + case MSR_IA32_SYSENTER_EIP: > + case MSR_IA32_SYSENTER_ESP: > + case MSR_IA32_SYSENTER_CS: > + case MSR_IA32_MC0_CTL: > + case MSR_STAR: > + case MSR_LSTAR: > + return; So you're adding an array further down, but just to use it there? My main point in asking for something like that was to have a _single_ place where all the relevant MSRs get enumerated. > + > + default: > + break; This is pointless. > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -1682,6 +1682,23 @@ void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx, > *eax |= XEN_HVM_CPUID_X2APIC_VIRT; > } > > +static void vmx_enable_msr_exit_interception(struct domain *d) > +{ > + struct vcpu *v; > + const u32 msrs[] = { MSR_IA32_SYSENTER_EIP, > + MSR_IA32_SYSENTER_ESP, > + MSR_IA32_SYSENTER_CS, > + MSR_IA32_MC0_CTL, > + MSR_STAR, > + MSR_LSTAR }; static > + int i; unsigned Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |