|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/msr: Implement rdmsr_safe() in C
commit d7c9d05cd1c3a0919e129c10a5b29b3215136552
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Apr 18 17:32:38 2025 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri Aug 22 23:34:07 2025 +0100
x86/msr: Implement rdmsr_safe() in C
... in preparation to be able to use asm goto.
Notably this mean that the value parameter must be taken by pointer rather
than by value.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/acpi/cpufreq/hwp.c | 14 +++++++-------
xen/arch/x86/apic.c | 2 +-
xen/arch/x86/cpu/amd.c | 25 ++++++++++++-------------
xen/arch/x86/cpu/common.c | 6 +++---
xen/arch/x86/cpu/intel.c | 8 ++++----
xen/arch/x86/cpu/mcheck/mce_amd.c | 2 +-
xen/arch/x86/cpu/mcheck/mce_intel.c | 6 +++---
xen/arch/x86/hvm/ioreq.c | 2 +-
xen/arch/x86/hvm/svm/svm.c | 16 ++++++++--------
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/include/asm/msr.h | 35 +++++++++++++++++++++--------------
xen/arch/x86/msr.c | 8 ++++----
xen/arch/x86/platform_hypercall.c | 2 +-
xen/arch/x86/pv/emul-priv-op.c | 18 +++++++++---------
xen/arch/x86/spec_ctrl.c | 2 +-
xen/arch/x86/x86_64/mmconfig-shared.c | 2 +-
xen/drivers/passthrough/vtd/iommu.c | 2 +-
17 files changed, 80 insertions(+), 74 deletions(-)
diff --git a/xen/arch/x86/acpi/cpufreq/hwp.c b/xen/arch/x86/acpi/cpufreq/hwp.c
index 36ecb0ed9d..38037d8300 100644
--- a/xen/arch/x86/acpi/cpufreq/hwp.c
+++ b/xen/arch/x86/acpi/cpufreq/hwp.c
@@ -249,7 +249,7 @@ static void cf_check hwp_write_request(void *info)
{
hwp_verbose("CPU%u: error wrmsr_safe(MSR_HWP_REQUEST, %lx)\n",
policy->cpu, hwp_req.raw);
- rdmsr_safe(MSR_HWP_REQUEST, data->curr_req.raw);
+ rdmsr_safe(MSR_HWP_REQUEST, &data->curr_req.raw);
data->ret = -EINVAL;
}
}
@@ -285,7 +285,7 @@ static bool hdc_set_pkg_hdc_ctl(unsigned int cpu, bool val)
{
uint64_t msr;
- if ( rdmsr_safe(MSR_PKG_HDC_CTL, msr) )
+ if ( rdmsr_safe(MSR_PKG_HDC_CTL, &msr) )
{
hwp_err(cpu, "rdmsr_safe(MSR_PKG_HDC_CTL)\n");
return false;
@@ -309,7 +309,7 @@ static bool hdc_set_pm_ctl1(unsigned int cpu, bool val)
{
uint64_t msr;
- if ( rdmsr_safe(MSR_PM_CTL1, msr) )
+ if ( rdmsr_safe(MSR_PM_CTL1, &msr) )
{
hwp_err(cpu, "rdmsr_safe(MSR_PM_CTL1)\n");
return false;
@@ -357,7 +357,7 @@ static void cf_check hwp_init_msrs(void *info)
* Package level MSR, but we don't have a good idea of packages here, so
* just do it everytime.
*/
- if ( rdmsr_safe(MSR_PM_ENABLE, val) )
+ if ( rdmsr_safe(MSR_PM_ENABLE, &val) )
{
hwp_err(policy->cpu, "rdmsr_safe(MSR_PM_ENABLE)\n");
data->curr_req.raw = -1;
@@ -379,13 +379,13 @@ static void cf_check hwp_init_msrs(void *info)
}
}
- if ( rdmsr_safe(MSR_HWP_CAPABILITIES, data->hwp_caps) )
+ if ( rdmsr_safe(MSR_HWP_CAPABILITIES, &data->hwp_caps) )
{
hwp_err(policy->cpu, "rdmsr_safe(MSR_HWP_CAPABILITIES)\n");
goto error;
}
- if ( rdmsr_safe(MSR_HWP_REQUEST, data->curr_req.raw) )
+ if ( rdmsr_safe(MSR_HWP_REQUEST, &data->curr_req.raw) )
{
hwp_err(policy->cpu, "rdmsr_safe(MSR_HWP_REQUEST)\n");
goto error;
@@ -485,7 +485,7 @@ static void cf_check hwp_set_misc_turbo(void *info)
data->ret = 0;
- if ( rdmsr_safe(MSR_IA32_MISC_ENABLE, msr) )
+ if ( rdmsr_safe(MSR_IA32_MISC_ENABLE, &msr) )
{
hwp_verbose("CPU%u: error rdmsr_safe(MSR_IA32_MISC_ENABLE)\n",
policy->cpu);
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index 0fd8bdba70..cac5ba39e6 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -764,7 +764,7 @@ static int __init detect_init_APIC (void)
if (enable_local_apic < 0)
return -1;
- if ( rdmsr_safe(MSR_APIC_BASE, msr_content) )
+ if ( rdmsr_safe(MSR_APIC_BASE, &msr_content) )
{
printk("No local APIC present\n");
return -1;
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index eb428f284e..567b992a9f 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -496,7 +496,7 @@ static void cf_check disable_c1e(void *unused)
* The MSR does not exist in all FamilyF CPUs (only Rev F and above),
* but we safely catch the #GP in that case.
*/
- if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) &&
+ if ((rdmsr_safe(MSR_K8_ENABLE_C1E, &msr_content) == 0) &&
(msr_content & (3ULL << 27)) &&
(wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3ULL << 27)) != 0))
printk(KERN_ERR "Failed to disable C1E on CPU#%u
(%16"PRIx64")\n",
@@ -695,21 +695,21 @@ static void amd_process_freq(const struct cpuinfo_x86 *c,
lo = 0; /* gcc may not recognize the loop having at least 5 iterations
*/
for (h = c->x86 == 0x10 ? 5 : 8; h--; )
- if (!rdmsr_safe(0xC0010064 + h, lo) && (lo >> 63))
+ if (!rdmsr_safe(0xC0010064 + h, &lo) && (lo >> 63))
break;
if (!(lo >> 63))
return;
if (idx && idx < h &&
- !rdmsr_safe(0xC0010064 + idx, val) && (val >> 63) &&
- !rdmsr_safe(0xC0010064, hi) && (hi >> 63)) {
+ !rdmsr_safe(0xC0010064 + idx, &val) && (val >> 63) &&
+ !rdmsr_safe(0xC0010064, &hi) && (hi >> 63)) {
if (nom_mhz)
*nom_mhz = amd_parse_freq(c->x86, val);
if (low_mhz)
*low_mhz = amd_parse_freq(c->x86, lo);
if (hi_mhz)
*hi_mhz = amd_parse_freq(c->x86, hi);
- } else if (h && !rdmsr_safe(0xC0010064, hi) && (hi >> 63)) {
+ } else if (h && !rdmsr_safe(0xC0010064, &hi) && (hi >> 63)) {
if (low_mhz)
*low_mhz = amd_parse_freq(c->x86, lo);
if (hi_mhz)
@@ -765,7 +765,7 @@ void amd_init_lfence(struct cpuinfo_x86 *c)
* rather than per-thread, so do a full safe read/write/readback cycle
* in the worst case.
*/
- if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
+ if (rdmsr_safe(MSR_AMD64_DE_CFG, &value))
/* Unable to read. Assume the safer default. */
__clear_bit(X86_FEATURE_LFENCE_DISPATCH,
c->x86_capability);
@@ -775,7 +775,7 @@ void amd_init_lfence(struct cpuinfo_x86 *c)
c->x86_capability);
else if (wrmsr_safe(MSR_AMD64_DE_CFG,
value | AMD64_DE_CFG_LFENCE_SERIALISE) ||
- rdmsr_safe(MSR_AMD64_DE_CFG, value) ||
+ rdmsr_safe(MSR_AMD64_DE_CFG, &value) ||
!(value & AMD64_DE_CFG_LFENCE_SERIALISE))
/* Attempt to set failed. Assume the safer default. */
__clear_bit(X86_FEATURE_LFENCE_DISPATCH,
@@ -804,7 +804,7 @@ static bool set_legacy_ssbd(const struct cpuinfo_x86 *c,
bool enable)
if (bit >= 0) {
uint64_t val, mask = 1ull << bit;
- if (rdmsr_safe(MSR_AMD64_LS_CFG, val) ||
+ if (rdmsr_safe(MSR_AMD64_LS_CFG, &val) ||
({
val &= ~mask;
if (enable)
@@ -962,7 +962,7 @@ void amd_init_spectral_chicken(void)
if (cpu_has_hypervisor || !is_zen2_uarch())
return;
- if (rdmsr_safe(MSR_AMD64_DE_CFG2, val) == 0 && !(val & chickenbit))
+ if (rdmsr_safe(MSR_AMD64_DE_CFG2, &val) == 0 && !(val & chickenbit))
wrmsr_safe(MSR_AMD64_DE_CFG2, val | chickenbit);
}
@@ -1116,8 +1116,7 @@ static void amd_check_bp_cfg(void)
static void cf_check init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
-
- unsigned long long value;
+ uint64_t value;
/* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
@@ -1251,7 +1250,7 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
if ((c->x86 == 0x15) &&
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT) &&
- !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, value)) {
+ !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, &value)) {
value |= 1ULL << 54;
wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value);
rdmsrl(MSR_K8_EXT_FEATURE_MASK, value);
@@ -1267,7 +1266,7 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
* Disable it on the affected CPUs.
*/
if (c->x86 == 0x15 && c->x86_model >= 0x02 && c->x86_model < 0x20 &&
- !rdmsr_safe(MSR_AMD64_IC_CFG, value) && (value & 0x1e) != 0x1e)
+ !rdmsr_safe(MSR_AMD64_IC_CFG, &value) && (value & 0x1e) != 0x1e)
wrmsr_safe(MSR_AMD64_IC_CFG, value | 0x1e);
amd_get_topology(c);
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index a66cd5f9c7..530b9eb39a 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -131,14 +131,14 @@ bool __init probe_cpuid_faulting(void)
uint64_t val;
int rc;
- if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
+ if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, &val)) == 0)
raw_cpu_policy.platform_info.cpuid_faulting =
val & MSR_PLATFORM_INFO_CPUID_FAULTING;
if (rc ||
!(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
- this_cpu(msr_misc_features)))
+ &this_cpu(msr_misc_features)))
{
setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
return false;
@@ -852,7 +852,7 @@ static void skinit_enable_intr(void)
* If the platform is performing a Secure Launch via SKINIT
* INIT_REDIRECTION flag will be active.
*/
- if ( !cpu_has_skinit || rdmsr_safe(MSR_K8_VM_CR, val) ||
+ if ( !cpu_has_skinit || rdmsr_safe(MSR_K8_VM_CR, &val) ||
!(val & VM_CR_INIT_REDIRECTION) )
return;
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 29144ffe37..ecca11f04d 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -115,7 +115,7 @@ static uint64_t __init _probe_mask_msr(unsigned int *msr,
uint64_t caps)
expected_levelling_cap |= caps;
- if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
+ if (rdmsr_safe(*msr, &val) || wrmsr_safe(*msr, val))
*msr = 0;
else
levelling_caps |= caps;
@@ -546,7 +546,7 @@ static void intel_log_freq(const struct cpuinfo_x86 *c)
{ 26667, 13333, 20000, 16667, 33333, 10000, 40000 };
case 6:
- if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, msrval) )
+ if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, &msrval) )
return;
max_ratio = msrval >> 8;
min_ratio = msrval >> 40;
@@ -566,7 +566,7 @@ static void intel_log_freq(const struct cpuinfo_x86 *c)
*/
if ( min_ratio > max_ratio )
SWAP(min_ratio, max_ratio);
- if ( rdmsr_safe(MSR_FSB_FREQ, msrval) ||
+ if ( rdmsr_safe(MSR_FSB_FREQ, &msrval) ||
(msrval &= 7) >= ARRAY_SIZE(core_factors) )
return;
factor = core_factors[msrval];
@@ -584,7 +584,7 @@ static void intel_log_freq(const struct cpuinfo_x86 *c)
break;
case 0xf:
- if ( rdmsr_safe(MSR_IA32_EBC_FREQUENCY_ID, msrval) )
+ if ( rdmsr_safe(MSR_IA32_EBC_FREQUENCY_ID, &msrval) )
return;
max_ratio = msrval >> 24;
min_ratio = 0;
diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c
b/xen/arch/x86/cpu/mcheck/mce_amd.c
index 4f06a3153b..25c29eb3d2 100644
--- a/xen/arch/x86/cpu/mcheck/mce_amd.c
+++ b/xen/arch/x86/cpu/mcheck/mce_amd.c
@@ -195,7 +195,7 @@ static void mcequirk_amd_apply(enum mcequirk_amd_flags
flags)
break;
case MCEQUIRK_F10_GART:
- if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), val) == 0 )
+ if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), &val) == 0 )
wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10));
break;
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c
b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 1e52b1ac25..fac60f2d37 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -726,7 +726,7 @@ static bool intel_enable_lmce(void)
* MSR_IA32_MCG_EXT_CTL.LMCE_EN.
*/
- if ( rdmsr_safe(MSR_IA32_FEATURE_CONTROL, msr_content) )
+ if ( rdmsr_safe(MSR_IA32_FEATURE_CONTROL, &msr_content) )
return false;
if ( (msr_content & IA32_FEATURE_CONTROL_LOCK) &&
@@ -879,14 +879,14 @@ static void intel_init_ppin(const struct cpuinfo_x86 *c)
case 0x8f: /* Sapphire Rapids X */
if ( (c != &boot_cpu_data && !ppin_msr) ||
- rdmsr_safe(MSR_PPIN_CTL, val) )
+ rdmsr_safe(MSR_PPIN_CTL, &val) )
return;
/* If PPIN is disabled, but not locked, try to enable. */
if ( !(val & (PPIN_ENABLE | PPIN_LOCKOUT)) )
{
wrmsr_safe(MSR_PPIN_CTL, val | PPIN_ENABLE);
- rdmsr_safe(MSR_PPIN_CTL, val);
+ rdmsrl(MSR_PPIN_CTL, val);
}
if ( !(val & PPIN_ENABLE) )
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 199cee5b33..a5fa97e149 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -293,7 +293,7 @@ bool arch_ioreq_server_get_type_addr(const struct domain *d,
{
uint64_t msr_val;
- if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
+ if ( !rdmsr_safe(MSR_AMD64_NB_CFG, &msr_val) &&
(msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
*addr |= CF8_ADDR_HI(cf8);
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 7301846c2a..b54f9d9af5 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1083,8 +1083,8 @@ static void svm_host_osvw_init(void)
{
uint64_t len, status;
- if ( rdmsr_safe(MSR_AMD_OSVW_ID_LENGTH, len) ||
- rdmsr_safe(MSR_AMD_OSVW_STATUS, status) )
+ if ( rdmsr_safe(MSR_AMD_OSVW_ID_LENGTH, &len) ||
+ rdmsr_safe(MSR_AMD_OSVW_STATUS, &status) )
len = status = 0;
if ( len < osvw_length )
@@ -1482,7 +1482,7 @@ static void svm_init_erratum_383(const struct cpuinfo_x86
*c)
return;
/* use safe methods to be compatible with nested virtualization */
- if ( rdmsr_safe(MSR_AMD64_DC_CFG, msr_content) == 0 &&
+ if ( rdmsr_safe(MSR_AMD64_DC_CFG, &msr_content) == 0 &&
wrmsr_safe(MSR_AMD64_DC_CFG, msr_content | (1ULL << 47)) == 0 )
amd_erratum383_found = 1;
else
@@ -1786,7 +1786,7 @@ static int cf_check svm_msr_read_intercept(
break;
case MSR_F10_BU_CFG:
- if ( !rdmsr_safe(msr, *msr_content) )
+ if ( !rdmsr_safe(msr, msr_content) )
break;
if ( boot_cpu_data.x86 == 0xf )
@@ -1805,7 +1805,7 @@ static int cf_check svm_msr_read_intercept(
goto gpf;
case MSR_F10_BU_CFG2:
- if ( rdmsr_safe(msr, *msr_content) )
+ if ( rdmsr_safe(msr, msr_content) )
goto gpf;
break;
@@ -1882,7 +1882,7 @@ static int cf_check svm_msr_read_intercept(
break;
default:
- if ( d->arch.msr_relaxed && !rdmsr_safe(msr, tmp) )
+ if ( d->arch.msr_relaxed && !rdmsr_safe(msr, &tmp) )
{
*msr_content = 0;
break;
@@ -2048,7 +2048,7 @@ static int cf_check svm_msr_write_intercept(
case MSR_F10_BU_CFG:
case MSR_F10_BU_CFG2:
- if ( rdmsr_safe(msr, msr_content) )
+ if ( rdmsr_safe(msr, &msr_content) )
goto gpf;
break;
@@ -2069,7 +2069,7 @@ static int cf_check svm_msr_write_intercept(
break;
default:
- if ( d->arch.msr_relaxed && !rdmsr_safe(msr, msr_content) )
+ if ( d->arch.msr_relaxed && !rdmsr_safe(msr, &msr_content) )
break;
gdprintk(XENLOG_WARNING,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 5439f950dc..f97a774653 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3503,7 +3503,7 @@ static int cf_check vmx_msr_read_intercept(
break;
}
- if ( curr->domain->arch.msr_relaxed && !rdmsr_safe(msr, tmp) )
+ if ( curr->domain->arch.msr_relaxed && !rdmsr_safe(msr, &tmp) )
{
*msr_content = 0;
break;
@@ -3813,7 +3813,7 @@ static int cf_check vmx_msr_write_intercept(
is_last_branch_msr(msr) )
break;
- if ( v->domain->arch.msr_relaxed && !rdmsr_safe(msr, msr_content) )
+ if ( v->domain->arch.msr_relaxed && !rdmsr_safe(msr, &msr_content) )
break;
gdprintk(XENLOG_WARNING,
diff --git a/xen/arch/x86/include/asm/msr.h b/xen/arch/x86/include/asm/msr.h
index f5df8698b7..70e6796a45 100644
--- a/xen/arch/x86/include/asm/msr.h
+++ b/xen/arch/x86/include/asm/msr.h
@@ -48,20 +48,27 @@ static inline void wrmsrns(uint32_t msr, uint64_t val)
}
/* rdmsr with exception handling */
-#define rdmsr_safe(msr,val) ({\
- int rc_; \
- uint64_t lo_, hi_; \
- __asm__ __volatile__( \
- "1: rdmsr\n2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: xorl %k0,%k0\n; xorl %k1,%k1\n" \
- " movl %5,%2\n; jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=a" (lo_), "=d" (hi_), "=&r" (rc_) \
- : "c" (msr), "2" (0), "i" (-EFAULT)); \
- val = lo_ | (hi_ << 32); \
- rc_; })
+static inline int rdmsr_safe(unsigned int msr, uint64_t *val)
+{
+ uint64_t lo, hi;
+ int rc;
+
+ asm_inline volatile (
+ "1: rdmsr\n2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: xorl %k0,%k0\n\t"
+ " xorl %k1,%k1\n\t"
+ " movl %5,%2\n\t"
+ " jmp 2b\n\t"
+ ".previous"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (lo), "=d" (hi), "=&r" (rc)
+ : "c" (msr), "2" (0), "i" (-EFAULT) );
+
+ *val = lo | (hi << 32);
+
+ return rc;
+}
/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned int msr, uint64_t val)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index c6465b41ee..ad75a2e108 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -192,7 +192,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
!(boot_cpu_data.x86_vendor &
(X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
- rdmsr_safe(MSR_AMD_PATCHLEVEL, *val) )
+ rdmsr_safe(MSR_AMD_PATCHLEVEL, val) )
goto gp_fault;
break;
@@ -240,7 +240,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
goto gp_fault;
*val = 0;
- if ( likely(!is_cpufreq_controller(d)) || rdmsr_safe(msr, *val) == 0 )
+ if ( likely(!is_cpufreq_controller(d)) || rdmsr_safe(msr, val) == 0 )
break;
goto gp_fault;
@@ -306,7 +306,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
goto gp_fault;
if ( !is_hardware_domain(d) )
return X86EMUL_UNHANDLEABLE;
- if ( rdmsr_safe(msr, *val) )
+ if ( rdmsr_safe(msr, val) )
goto gp_fault;
if ( msr == MSR_K8_SYSCFG )
*val &= (SYSCFG_TOM2_FORCE_WB | SYSCFG_MTRR_TOM2_EN |
@@ -322,7 +322,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
case MSR_FAM10H_MMIO_CONF_BASE:
if ( !is_hardware_domain(d) ||
!(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
- rdmsr_safe(msr, *val) )
+ rdmsr_safe(msr, val) )
goto gp_fault;
break;
diff --git a/xen/arch/x86/platform_hypercall.c
b/xen/arch/x86/platform_hypercall.c
index 3eba791889..21f9f795c1 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -186,7 +186,7 @@ void cf_check resource_access(void *info)
if ( unlikely(read_tsc) )
local_irq_save(flags);
- ret = rdmsr_safe(entry->idx, entry->val);
+ ret = rdmsr_safe(entry->idx, &entry->val);
if ( unlikely(read_tsc) )
{
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 9f8600f43a..f46aaf2a3b 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -249,7 +249,7 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int
start,
{
uint64_t msr_val;
- if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
+ if ( rdmsr_safe(MSR_AMD64_NB_CFG, &msr_val) )
return false;
if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
start |= CF8_ADDR_HI(currd->arch.pci_cf8);
@@ -962,7 +962,7 @@ static int cf_check read_msr(
return X86EMUL_OKAY;
case MSR_IA32_MISC_ENABLE:
- if ( rdmsr_safe(reg, *val) )
+ if ( rdmsr_safe(reg, val) )
break;
*val = guest_misc_enable(*val);
return X86EMUL_OKAY;
@@ -992,7 +992,7 @@ static int cf_check read_msr(
}
/* fall through */
default:
- if ( currd->arch.msr_relaxed && !rdmsr_safe(reg, tmp) )
+ if ( currd->arch.msr_relaxed && !rdmsr_safe(reg, &tmp) )
{
*val = 0;
return X86EMUL_OKAY;
@@ -1002,14 +1002,14 @@ static int cf_check read_msr(
break;
normal:
- if ( rdmsr_safe(reg, *val) )
+ if ( rdmsr_safe(reg, val) )
break;
return X86EMUL_OKAY;
}
done:
if ( ret != X86EMUL_OKAY && !curr->arch.pv.trap_ctxt[X86_EXC_GP].address &&
- (reg >> 16) != 0x4000 && !rdmsr_safe(reg, tmp) )
+ (reg >> 16) != 0x4000 && !rdmsr_safe(reg, &tmp) )
{
gprintk(XENLOG_WARNING, "faking RDMSR 0x%08x\n", reg);
*val = 0;
@@ -1096,7 +1096,7 @@ static int cf_check write_msr(
case MSR_AMD64_NB_CFG:
if ( !is_hwdom_pinned_vcpu(curr) )
return X86EMUL_OKAY;
- if ( (rdmsr_safe(MSR_AMD64_NB_CFG, temp) != 0) ||
+ if ( (rdmsr_safe(MSR_AMD64_NB_CFG, &temp) != 0) ||
((val ^ temp) & ~(1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
goto invalid;
if ( wrmsr_safe(MSR_AMD64_NB_CFG, val) == 0 )
@@ -1109,7 +1109,7 @@ static int cf_check write_msr(
break;
if ( !is_hwdom_pinned_vcpu(curr) )
return X86EMUL_OKAY;
- if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, temp) != 0 )
+ if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, &temp) != 0 )
break;
if ( (pci_probe & PCI_PROBE_MASK) == PCI_PROBE_MMCONF ?
temp != val :
@@ -1125,7 +1125,7 @@ static int cf_check write_msr(
break;
case MSR_IA32_MISC_ENABLE:
- if ( rdmsr_safe(reg, temp) )
+ if ( rdmsr_safe(reg, &temp) )
break;
if ( val != guest_misc_enable(temp) )
goto invalid;
@@ -1172,7 +1172,7 @@ static int cf_check write_msr(
}
/* fall through */
default:
- if ( currd->arch.msr_relaxed && !rdmsr_safe(reg, val) )
+ if ( currd->arch.msr_relaxed && !rdmsr_safe(reg, &val) )
return X86EMUL_OKAY;
gdprintk(XENLOG_WARNING,
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 1ff3d6835d..e71f62c601 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -714,7 +714,7 @@ static bool __init check_smt_enabled(void)
*/
if ( boot_cpu_data.vendor == X86_VENDOR_INTEL &&
boot_cpu_data.family != 0xf && !cpu_has_hypervisor &&
- !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, val) )
+ !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, &val) )
return (MASK_EXTR(val, MSR_CTC_CORE_MASK) !=
MASK_EXTR(val, MSR_CTC_THREAD_MASK));
diff --git a/xen/arch/x86/x86_64/mmconfig-shared.c
b/xen/arch/x86/x86_64/mmconfig-shared.c
index fbe2676f86..d53da1e2a7 100644
--- a/xen/arch/x86/x86_64/mmconfig-shared.c
+++ b/xen/arch/x86/x86_64/mmconfig-shared.c
@@ -149,7 +149,7 @@ static const char *__init cf_check
pci_mmcfg_amd_fam10h(void)
return NULL;
address = MSR_FAM10H_MMIO_CONF_BASE;
- if (rdmsr_safe(address, msr_content))
+ if ( rdmsr_safe(address, &msr_content) )
return NULL;
/* mmconfig is not enable */
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index c55f02c97e..b4105163cc 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2286,7 +2286,7 @@ static bool __init vtd_ept_page_compatible(const struct
vtd_iommu *iommu)
/* EPT is not initialised yet, so we must check the capability in
* the MSR explicitly rather than use cpu_has_vmx_ept_*() */
- if ( rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, ept_cap) != 0 )
+ if ( rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, &ept_cap) != 0 )
return false;
return (ept_has_2mb(ept_cap) && opt_hap_2mb) <=
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |