[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers



Now that we other HVM code start making more extensive use of
hvm_cpuid(), let's not force every caller to declare dummy variables
for output not cared about.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2757,7 +2757,17 @@ void hvm_cpuid(unsigned int input, unsig
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned int count = *ecx;
+    unsigned int count, dummy = 0;
+
+    if ( !eax )
+        eax = &dummy;
+    if ( !ebx )
+        ebx = &dummy;
+    if ( !ecx )
+        ecx = &dummy;
+    count = *ecx;
+    if ( !edx )
+        edx = &dummy;
 
     if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
         return;
@@ -2765,7 +2775,7 @@ void hvm_cpuid(unsigned int input, unsig
     if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
         return;
 
-    domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
+    domain_cpuid(d, input, count, eax, ebx, ecx, edx);
 
     switch ( input )
     {
@@ -2853,15 +2863,15 @@ int hvm_msr_read_intercept(unsigned int 
 {
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     switch ( msr )
     {
@@ -2969,15 +2979,15 @@ int hvm_msr_read_intercept(unsigned int 
 int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 {
     struct vcpu *v = current;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     hvm_memory_event_msr(msr, msr_content);
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct v
 /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
 static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
 {
-    unsigned int eax, ebx, ecx, edx;
+    unsigned int edx;
     uint32_t msr_low;
     static uint8_t lwp_intr_vector;
 
     if ( xsave_enabled(v) && cpu_has_lwp )
     {
-        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+        hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
         msr_low = (uint32_t)msr_content;
         
         /* generate #GP if guest tries to turn on unsupported features. */
@@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct 
 
 static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t 
read)
 {
-    uint eax, ebx, ecx, edx;
+    unsigned int ecx;
 
     /* Guest OSVW support */
-    hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+    hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
     if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
         return -1;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
-    unsigned int eax, ebx, ecx, edx, dummy;
+    unsigned int eax, ebx, ecx, edx;
     u64 data = 0, host_data = 0;
     int r = 1;
 
@@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
         return 0;
 
     /* VMX capablity MSRs are available only when guest supports VMX. */
-    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
+    hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
     if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
         return 0;
 
@@ -1972,18 +1972,18 @@ int nvmx_msr_read_intercept(unsigned int
         if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
             data |= X86_CR4_OSXSAVE;
 
-        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+        hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
         switch ( eax )
         {
         default:
-            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+            hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
             /* Check whether guest has the perf monitor feature. */
             if ( (eax & 0xff) && (eax & 0xff00) )
                 data |= X86_CR4_PCE;
             /* fall through */
         case 0x7 ... 0x9:
             ecx = 0;
-            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+            hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
             if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
                 data |= X86_CR4_FSGSBASE;
             if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )


Attachment: x86-hvm_cpuid-tolerate-NULL.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.