[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [Xen-devel] [PATCH] X86: cpuid faulting feature enable



Jan Beulich wrote:
>>>> On 01.07.11 at 16:32, "Liu, Jinsong" <jinsong.liu@xxxxxxxxx> wrote:
>> X86: cpuid faulting feature enable
>> 
>> Latest Intel processor add cpuid faulting feature. This patch is
>> used to support cpuid faulting in Xen.
>> Like cpuid spoofing, cpuid faulting mainly used to support live
>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>> emulate execution of the cpuid instruction. Hence will appear to
>> guest software the value chosen by the vmm. 
>> 
>> Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
>> 
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>> --- a/xen/arch/x86/cpu/common.c      Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/common.c      Fri Jul 01 19:04:41 2011 +0800
>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>>      early_cpu_detect();
>>  }
>> +
>> +static int __init cpuid_faulting_init(void)
>> +{
>> +    if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
>> +            cpu_has_cpuid_faulting ) { +            cpuid_faulting_flip =
>> intel_cpuid_faulting_flip; + } +
>> +    return 0;
>> +}
>> +__initcall(cpuid_faulting_init);
>> +
>>  /*
>>   * cpu_init() initializes state that is per-CPU. Some data is
>> already 
>>   * initialized (naturally) in the bootstrap process, such as the GDT
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>  extern void generic_identify(struct cpuinfo_x86 * c);
>> 
>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>> --- a/xen/arch/x86/cpu/intel.c       Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/intel.c       Fri Jul 01 19:04:41 2011 +0800 @@
>>   -24,6 +24,39 @@ */
>>  struct movsl_mask movsl_mask __read_mostly;
>>  #endif
>> +
>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{
>> +    uint32_t hi, lo;
>> +    struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()]; +
>> +    /*
>> +    * Currently only one type of intel processor support cpuid
>> faulting. +  * FIXME when needed in the future.
>> +    */
>> +    if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))
> 
> Down to a particular stepping? That surely doesn't make sense for
> anything but your own experimenting.

Yes, it's some ugly.
Currently cpuid faulting is not a architecturally commited feature, and, some 
other Intel processors (which do not has cpuid faulting feature) also has 0xceh 
MSR.
Hence I use current way for safe. However, I marked it as FIXME to update in 
the future accordingly.

> 
>> +            return 0;
>> +
>> +    rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>> +    if (lo & (1 << 31))
>> +            return 1;
>> +
>> +    return 0;
>> +}
>> +
>> +void intel_cpuid_faulting_flip(unsigned int enable) +{
>> +    uint32_t hi, lo;
>> +
>> +    rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +    if (enable)
>> +            lo |= 1;
>> +    else
>> +            lo &= ~1;
>> +    wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +
>> +    per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>> 
>>  /*
>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct 
>>      detect_ht(c); }
>> 
>> -    set_cpuidmask(c);
>> +    if (intel_cpuid_faulting_enumerate())
>> +            set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); +       
>> else
>> +            set_cpuidmask(c);
>> 
>>      /* Work around errata */
>>      Intel_errata_workarounds(c);
>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>> --- a/xen/arch/x86/domain.c  Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/domain.c  Fri Jul 01 19:04:41 2011 +0800 @@ -63,6
>> +63,9 @@ 
>> 
>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>  DEFINE_PER_CPU(unsigned long, cr4);
>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +void (*cpuid_faulting_flip)(unsigned int enable);
> 
> bool_t for both (and elsewhere in the patch)?

OK, will change it to bool_t.


> 
>> 
>>  static void default_idle(void);
>>  static void default_dead_idle(void);
>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s     
>>              load_LDT(next); load_segments(next);
>>          }
>> +
>> +    if ( cpuid_faulting_flip )
>> +    {
>> +            unsigned int enable;
>> +
>> +            enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
> 
> Excluding Dom0 here is perhaps questionable (as it could allow hiding
> features not supported by Xen from applications).
> 
>> +            if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>> +                    cpuid_faulting_flip(enable);
>> +    }
>>      }
>> 
>>      context_saved(prev);
>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>> --- a/xen/arch/x86/traps.c   Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/traps.c   Fri Jul 01 19:04:41 2011 +0800
>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>> 
>>   twobyte_opcode:
>>      /*
>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>> (0x1,0xF9) 
>> -     * are executable only from guest kernel mode (virtual ring 0).
>> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from guest
>>       kernel mode +     * (virtual ring 0). */
>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )  
>> goto fail; 
>> 
>>      if ( lock && (opcode & ~3) != 0x20 )
>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>              regs->edx = (uint32_t)(msr_content >> 32);             
>>          break; }
>> +        break;
>> +
>> +    case 0xa2: /* CPUID */
>> +        pv_cpuid(regs);
>>          break;
>> 
>>      default:
>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>> --- a/xen/include/asm-x86/cpufeature.h       Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/cpufeature.h       Fri Jul 01 19:04:41 2011
>>  +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON (3*32+11)
>>  /* Intel Architectural PerfMon */ #define X86_FEATURE_TSC_RELIABLE
>>  (3*32+12) /* TSC is known to be reliable */ #define
>> X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions
>> */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting
>> */  
>> 
>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4
>>  */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD
>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb         0
>>  #define cpu_has_efer                (boot_cpu_data.x86_capability[1] &
>> 0x20100800)  #define cpu_has_fsgsbase        0 +#define
>> cpu_has_cpuid_faulting  0 
> 
> Why? I can't see anything in here that would require making this a
> 64-bit-only feature.

OK, will update _i386_.

Thanks,
Jinsong

> 
> Jan
> 
>>  #else /* __x86_64__ */
>>  #define cpu_has_vme         0
>>  #define cpu_has_de          1
>> @@ -201,6 +203,7 @@
>>  #define cpu_has_page1gb             boot_cpu_has(X86_FEATURE_PAGE1GB)  
>> #define
>>  cpu_has_efer                1 #define
>> cpu_has_fsgsbase     boot_cpu_has(X86_FEATURE_FSGSBASE) +#define
>> cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING) 
>> #endif  
>> 
>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>> --- a/xen/include/asm-x86/msr-index.h        Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/msr-index.h        Fri Jul 01 19:04:41 2011 +0800
>>  @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1                        
>> 0x000000c2
>>  #define MSR_P6_EVNTSEL0                     0x00000186
>>  #define MSR_P6_EVNTSEL1                     0x00000187
>> -
>> -/* MSRs for Intel cpuid feature mask */
>> -#define MSR_INTEL_CPUID_FEATURE_MASK        0x00000478
>> -#define MSR_INTEL_CPUID1_FEATURE_MASK       0x00000130
>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>> 
>>  /* MSRs & bits used for VMX enabling */
>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL        0x0000038f
>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL       0x00000390
>> 
>> +/* Intel cpuid spoofing MSRs */
>> +#define MSR_INTEL_CPUID_FEATURE_MASK        0x00000478
>> +#define MSR_INTEL_CPUID1_FEATURE_MASK       0x00000130
>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>> +/* Intel cpuid faulting MSRs */
>> +#define MSR_INTEL_PLATFORM_INFO             0x000000ce
>> +#define MSR_INTEL_MISC_FEATURES_ENABLES     0x00000140 +
>>  /* Geode defined MSRs */
>>  #define MSR_GEODE_BUSCONT_CONF0             0x00001900
>> 
>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>> --- a/xen/include/asm-x86/processor.h        Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/processor.h        Fri Jul 01 19:04:41 2011 +0800
>> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>>  #define cpu_data (&boot_cpu_data)
>>  #define current_cpu_data boot_cpu_data
>>  #endif
>> +
>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  extern u64 host_pat;
>>  extern int phys_proc_id[NR_CPUS];
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.