|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/SVM: support data breakpoint extension registers
commit c097f54912d3b06e456f001c34a369e05851650e
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Apr 24 10:51:21 2014 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Apr 24 10:51:21 2014 +0200
x86/SVM: support data breakpoint extension registers
Leveraging the generic MSR save/restore logic introduced a little while
ago.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Tested-by: Aravind Gopalakrishnan<aravind.gopalakrishnan@xxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Reviewed-by: Aravind Gopalakrishnan<aravind.gopalakrishnan@xxxxxxx>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
tools/libxc/xc_cpufeature.h | 1 +
tools/libxc/xc_cpuid_x86.c | 3 +-
xen/arch/x86/hvm/hvm.c | 3 +
xen/arch/x86/hvm/svm/svm.c | 139 +++++++++++++++++++++++++++++++++++-
xen/include/asm-x86/cpufeature.h | 1 +
xen/include/asm-x86/hvm/svm/vmcb.h | 3 +
xen/include/asm-x86/hvm/vcpu.h | 2 +-
xen/include/asm-x86/msr-index.h | 5 ++
8 files changed, 154 insertions(+), 3 deletions(-)
diff --git a/tools/libxc/xc_cpufeature.h b/tools/libxc/xc_cpufeature.h
index 09b2c82..6fce03b 100644
--- a/tools/libxc/xc_cpufeature.h
+++ b/tools/libxc/xc_cpufeature.h
@@ -125,6 +125,7 @@
#define X86_FEATURE_NODEID_MSR 19 /* NodeId MSR */
#define X86_FEATURE_TBM 21 /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT 22 /* topology extensions CPUID leafs */
+#define X86_FEATURE_DBEXT 26 /* data breakpoint extension */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
#define X86_FEATURE_FSGSBASE 0 /* {RD,WR}{FS,GS}BASE instructions */
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index 9264039..71917e3 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -110,9 +110,10 @@ static void amd_xc_cpuid_policy(
bitmaskof(X86_FEATURE_3DNOWPREFETCH) |
bitmaskof(X86_FEATURE_OSVW) |
bitmaskof(X86_FEATURE_XOP) |
+ bitmaskof(X86_FEATURE_LWP) |
bitmaskof(X86_FEATURE_FMA4) |
bitmaskof(X86_FEATURE_TBM) |
- bitmaskof(X86_FEATURE_LWP));
+ bitmaskof(X86_FEATURE_DBEXT));
regs[3] &= (0x0183f3ff | /* features shared with 0x00000001:EDX */
(is_pae ? bitmaskof(X86_FEATURE_NX) : 0) |
(is_64bit ? bitmaskof(X86_FEATURE_LM) : 0) |
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 44fbb69..425316a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3088,6 +3088,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
/* Only provide PSE36 when guest runs in 32bit PAE or in long mode */
if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
*edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
+ /* Hide data breakpoint extensions if the hardware has no support. */
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
+ *ecx &= ~cpufeat_mask(X86_FEATURE_DBEXT);
break;
case 0x80000008:
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 3fe4b9c..6328e56 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -160,14 +160,28 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
flags)
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ unsigned int flag_dr_dirty = v->arch.hvm_vcpu.flag_dr_dirty;
- if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( !flag_dr_dirty )
return;
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
v->arch.hvm_vcpu.flag_dr_dirty = 0;
vmcb_set_dr_intercepts(vmcb, ~0u);
+ if ( flag_dr_dirty & 2 )
+ {
+ svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW);
+ svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_RW);
+ svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
+ svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
+
+ rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
+ rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
+ rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
+ rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+ }
+
v->arch.debugreg[0] = read_debugreg(0);
v->arch.debugreg[1] = read_debugreg(1);
v->arch.debugreg[2] = read_debugreg(2);
@@ -178,12 +192,32 @@ static void svm_save_dr(struct vcpu *v)
static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
{
+ unsigned int ecx;
+
if ( v->arch.hvm_vcpu.flag_dr_dirty )
return;
v->arch.hvm_vcpu.flag_dr_dirty = 1;
vmcb_set_dr_intercepts(vmcb, 0);
+ ASSERT(v == current);
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
+ if ( test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
+ {
+ svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+ svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+ svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+ svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+
+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
+ wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
+ wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
+ wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+
+ /* Can't use hvm_cpuid() in svm_save_dr(): v != current. */
+ v->arch.hvm_vcpu.flag_dr_dirty |= 2;
+ }
+
write_debugreg(0, v->arch.debugreg[0]);
write_debugreg(1, v->arch.debugreg[1]);
write_debugreg(2, v->arch.debugreg[2]);
@@ -356,6 +390,72 @@ static int svm_load_vmcb_ctxt(struct vcpu *v, struct
hvm_hw_cpu *ctxt)
return 0;
}
+static unsigned int __init svm_init_msr(void)
+{
+ return boot_cpu_has(X86_FEATURE_DBEXT) ? 4 : 0;
+}
+
+static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+ if ( boot_cpu_has(X86_FEATURE_DBEXT) )
+ {
+ ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[0];
+ if ( ctxt->msr[ctxt->count].val )
+ ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
+
+ ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[1];
+ if ( ctxt->msr[ctxt->count].val )
+ ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
+
+ ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[2];
+ if ( ctxt->msr[ctxt->count].val )
+ ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
+
+ ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[3];
+ if ( ctxt->msr[ctxt->count].val )
+ ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
+ }
+}
+
+static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+ unsigned int i, idx;
+ int err = 0;
+
+ for ( i = 0; i < ctxt->count; ++i )
+ {
+ switch ( idx = ctxt->msr[i].index )
+ {
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
+ err = -ENXIO;
+ else if ( ctxt->msr[i].val >> 32 )
+ err = -EDOM;
+ else
+ v->arch.hvm_svm.dr_mask[0] = ctxt->msr[i].val;
+ break;
+
+ case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
+ err = -ENXIO;
+ else if ( ctxt->msr[i].val >> 32 )
+ err = -EDOM;
+ else
+ v->arch.hvm_svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+ ctxt->msr[i].val;
+ break;
+
+ default:
+ continue;
+ }
+ if ( err )
+ break;
+ ctxt->msr[i]._rsvd = 1;
+ }
+
+ return err;
+}
+
static void svm_fpu_enter(struct vcpu *v)
{
struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
@@ -1456,6 +1556,8 @@ static int svm_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
switch ( msr )
{
+ unsigned int ecx;
+
case MSR_IA32_SYSENTER_CS:
*msr_content = v->arch.hvm_svm.guest_sysenter_cs;
break;
@@ -1531,6 +1633,21 @@ static int svm_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
vpmu_do_rdmsr(msr, msr_content);
break;
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
+ goto gpf;
+ *msr_content = v->arch.hvm_svm.dr_mask[0];
+ break;
+
+ case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
+ goto gpf;
+ *msr_content =
+ v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+ break;
+
case MSR_AMD_OSVW_ID_LENGTH:
case MSR_AMD_OSVW_STATUS:
ret = svm_handle_osvw(v, msr, msr_content, 1);
@@ -1599,6 +1716,8 @@ static int svm_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
switch ( msr )
{
+ unsigned int ecx;
+
case MSR_IA32_SYSENTER_CS:
vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
break;
@@ -1674,6 +1793,21 @@ static int svm_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
*/
break;
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> 32) )
+ goto gpf;
+ v->arch.hvm_svm.dr_mask[0] = msr_content;
+ break;
+
+ case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> 32) )
+ goto gpf;
+ v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+ msr_content;
+ break;
+
case MSR_AMD_OSVW_ID_LENGTH:
case MSR_AMD_OSVW_STATUS:
ret = svm_handle_osvw(v, msr, &msr_content, 0);
@@ -2027,6 +2161,9 @@ static struct hvm_function_table __initdata
svm_function_table = {
.vcpu_destroy = svm_vcpu_destroy,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
+ .init_msr = svm_init_msr,
+ .save_msr = svm_save_msr,
+ .load_msr = svm_load_msr,
.get_interrupt_shadow = svm_get_interrupt_shadow,
.set_interrupt_shadow = svm_set_interrupt_shadow,
.guest_x86_mode = svm_guest_x86_mode,
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index 0c4d6c1..6a6b1ab 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -134,6 +134,7 @@
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_DBEXT (6*32+26) /* data breakpoint extension */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
#define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h
b/xen/include/asm-x86/hvm/svm/vmcb.h
index 9b0c789..fd0b0a4 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -515,6 +515,9 @@ struct arch_svm_struct {
uint64_t guest_lwp_cfg; /* guest version */
uint64_t cpu_lwp_cfg; /* CPU version */
+ /* data breakpoint extension MSRs */
+ uint32_t dr_mask[4];
+
/* OSVW MSRs */
struct {
u64 length;
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 122ab0d..f34fa91 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -140,7 +140,7 @@ struct hvm_vcpu {
int xen_port;
- bool_t flag_dr_dirty;
+ u8 flag_dr_dirty;
bool_t debug_state_latch;
bool_t single_step;
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 7bb69fe..70a8201 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -208,6 +208,11 @@
#define MSR_AMD64_DC_CFG 0xc0011022
#define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46
+#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027
+#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019
+#define MSR_AMD64_DR2_ADDRESS_MASK 0xc001101a
+#define MSR_AMD64_DR3_ADDRESS_MASK 0xc001101b
+
/* AMD Family10h machine check MSRs */
#define MSR_F10_MC4_MISC1 0xc0000408
#define MSR_F10_MC4_MISC2 0xc0000409
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |