[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/1] arm/monitor vm-events: implement write-ctrlreg support
This patch adds ARM support for write-ctrlreg monitor vm-events. The ARM control-registers that can be monitored are: - VM_EVENT_ARM_SCTLR: AArch32 SCTLR, AArch64 SCTLR_EL1 - VM_EVENT_ARM_TTBR{0,1}: AArch32 TTBR{0,1}, AArch64 TTBR{0,1}_EL1 - VM_EVENT_ARM_TTBCR: AArch32 TTBCR, AArch64 TCR_EL1 Trapping of write operations for these registers was attained by setting the HCR_EL2.TVM / HCR.TVM bit. Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx> --- xen/arch/arm/p2m.c | 5 + xen/arch/arm/traps.c | 128 +++++++++++++++++++++- xen/arch/x86/hvm/event.c | 27 ----- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/monitor.c | 45 -------- xen/common/monitor.c | 48 +++++++++ xen/common/vm_event.c | 29 +++++ xen/include/asm-arm/domain.h | 8 ++ xen/include/asm-arm/traps.h | 227 ++++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/vm_event.h | 4 +- xen/include/asm-x86/hvm/event.h | 13 +-- xen/include/asm-x86/monitor.h | 2 - xen/include/public/vm_event.h | 8 +- xen/include/xen/monitor.h | 2 + xen/include/xen/vm_event.h | 8 ++ 16 files changed, 467 insertions(+), 91 deletions(-) create mode 100644 xen/include/asm-arm/traps.h diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index a2a9c4b..a32dfdd 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -108,6 +108,11 @@ void p2m_restore_state(struct vcpu *n) else hcr |= HCR_RW; + if ( likely(0 == n->domain->arch.monitor.write_ctrlreg_enabled) ) + hcr &= ~HCR_TVM; + else + hcr |= HCR_TVM; + WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1); isb(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 83744e8..3e1c8ee 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -31,8 +31,10 @@ #include <xen/softirq.h> #include <xen/domain_page.h> #include <xen/perfc.h> +#include <xen/vm_event.h> #include <public/sched.h> #include <public/xen.h> +#include <public/vm_event.h> #include <asm/debugger.h> #include <asm/event.h> #include <asm/regs.h> @@ -41,6 +43,7 @@ #include <asm/mmio.h> #include <asm/cpufeature.h> #include <asm/flushtlb.h> +#include <asm/traps.h> #include "decode.h" #include "vtimer.h" @@ -122,7 +125,12 @@ void init_traps(void) WRITE_SYSREG((HCPTR_CP_MASK & ~(HCPTR_CP(10) | HCPTR_CP(11))) | HCPTR_TTA, CPTR_EL2); - /* Setup hypervisor traps */ + /* Setup hypervisor traps + * + * Note: HCR_TVM bit is also set for system-register write monitoring + * purposes (see vm_event_monitor_cr), but (for performance reasons) that's + * done selectively (i.e. on the scheduling tail, see p2m_restore_state). + */ WRITE_SYSREG(HCR_PTW|HCR_BSU_INNER|HCR_AMO|HCR_IMO|HCR_FMO|HCR_VM| HCR_TWE|HCR_TWI|HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP, HCR_EL2); isb(); @@ -398,7 +406,6 @@ static void inject_abt32_exception(struct cpu_user_regs *regs, far |= addr << 32; WRITE_SYSREG(far, FAR_EL1); WRITE_SYSREG(fsr, IFSR32_EL2); - #endif } else @@ -1686,6 +1693,61 @@ static void do_cp15_32(struct cpu_user_regs *regs, switch ( hsr.bits & HSR_CP32_REGS_MASK ) { /* + * HCR_EL2.TVM / HCR.TVM + * + * ARMv7 (DDI 0406C.b): B1.14.13 + * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 + */ + case HSR_CPREG32(SCTLR): + TVM_EMUL_VMEVT(regs, hsr, *r, VM_EVENT_ARM_SCTLR, SCTLR); + break; + case HSR_CPREG32(TTBR0_32): + TVM_EMUL_VMEVT(regs, hsr, *r, VM_EVENT_ARM_TTBR0, TTBR0_32); + break; + case HSR_CPREG32(TTBR1_32): + TVM_EMUL_VMEVT(regs, hsr, *r, VM_EVENT_ARM_TTBR1, TTBR1_32); + break; + case HSR_CPREG32(TTBCR): + TVM_EMUL_VMEVT(regs, hsr, *r, VM_EVENT_ARM_TTBCR, TTBCR); + break; + case HSR_CPREG32(DACR): + TVM_EMUL(regs, hsr, *r, DACR); + break; + case HSR_CPREG32(DFSR): + TVM_EMUL(regs, hsr, *r, DFSR); + break; + case HSR_CPREG32(IFSR): + TVM_EMUL(regs, hsr, *r, IFSR); + break; + case HSR_CPREG32(DFAR): + TVM_EMUL(regs, hsr, *r, DFAR); + break; + case HSR_CPREG32(IFAR): + TVM_EMUL(regs, hsr, *r, IFAR); + break; + case HSR_CPREG32(ADFSR): + TVM_EMUL(regs, hsr, *r, ADFSR); + break; + case HSR_CPREG32(AIFSR): + TVM_EMUL(regs, hsr, *r, AIFSR); + break; + case HSR_CPREG32(MAIR0): + TVM_EMUL(regs, hsr, *r, MAIR0); + break; + case HSR_CPREG32(MAIR1): + TVM_EMUL(regs, hsr, *r, MAIR1); + break; + case HSR_CPREG32(AMAIR0): + TVM_EMUL(regs, hsr, *r, AMAIR0); + break; + case HSR_CPREG32(AMAIR1): + TVM_EMUL(regs, hsr, *r, AMAIR1); + break; + case HSR_CPREG32(CONTEXTIDR): + TVM_EMUL(regs, hsr, *r, CONTEXTIDR); + break; + + /* * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN * * ARMv7 (DDI 0406C.b): B4.1.22 @@ -1809,6 +1871,14 @@ static void do_cp15_32(struct cpu_user_regs *regs, static void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) { + const struct hsr_cp64 cp64 = hsr.cp64; + register_t *r1 = select_user_reg(regs, cp64.reg1); + register_t *r2 = select_user_reg(regs, cp64.reg2); + sysreg64_t sr64 = { + .l = (uint32_t) *r1, + .h = (uint32_t) *r2 + }; + if ( !check_conditional_instr(regs, hsr) ) { advance_pc(regs, hsr); @@ -1818,6 +1888,19 @@ static void do_cp15_64(struct cpu_user_regs *regs, switch ( hsr.bits & HSR_CP64_REGS_MASK ) { /* + * HCR_EL2.TVM / HCR.TVM + * + * ARMv7 (DDI 0406C.b): B1.14.13 + * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 + */ + case HSR_CPREG64(TTBR0): + TVM_EMUL_VMEVT(regs, hsr, sr64.v, VM_EVENT_ARM_TTBR0, TTBR0_64); + break; + case HSR_CPREG64(TTBR1): + TVM_EMUL_VMEVT(regs, hsr, sr64.v, VM_EVENT_ARM_TTBR1, TTBR1_64); + break; + + /* * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN * * ARMv7 (DDI 0406C.b): B4.1.22 @@ -1847,8 +1930,6 @@ static void do_cp15_64(struct cpu_user_regs *regs, */ default: { - const struct hsr_cp64 cp64 = hsr.cp64; - gdprintk(XENLOG_ERR, "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", cp64.read ? "mrrc" : "mcrr", @@ -2082,6 +2163,45 @@ static void do_sysreg(struct cpu_user_regs *regs, switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) { /* + * HCR_EL2.TVM + * + * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 + */ + case HSR_SYSREG_SCTLR_EL1: + TVM_EMUL_VMEVT(regs, hsr, *x, VM_EVENT_ARM_SCTLR, SCTLR_EL1); + break; + case HSR_SYSREG_TTBR0_EL1: + TVM_EMUL_VMEVT(regs, hsr, *x, VM_EVENT_ARM_TTBR0, TTBR0_EL1); + break; + case HSR_SYSREG_TTBR1_EL1: + TVM_EMUL_VMEVT(regs, hsr, *x, VM_EVENT_ARM_TTBR1, TTBR1_EL1); + break; + case HSR_SYSREG_TCR_EL1: + TVM_EMUL_VMEVT(regs, hsr, *x, VM_EVENT_ARM_TTBCR, TCR_EL1); + break; + case HSR_SYSREG_ESR_EL1: + TVM_EMUL(regs, hsr, *x, ESR_EL1); + break; + case HSR_SYSREG_FAR_EL1: + TVM_EMUL(regs, hsr, *x, FAR_EL1); + break; + case HSR_SYSREG_AFSR0_EL1: + TVM_EMUL(regs, hsr, *x, AFSR0_EL1); + break; + case HSR_SYSREG_AFSR1_EL1: + TVM_EMUL(regs, hsr, *x, AFSR1_EL1); + break; + case HSR_SYSREG_MAIR_EL1: + TVM_EMUL(regs, hsr, *x, MAIR_EL1); + break; + case HSR_SYSREG_AMAIR_EL1: + TVM_EMUL(regs, hsr, *x, AMAIR_EL1); + break; + case HSR_SYSREG_CONTEXTIDR_EL1: + TVM_EMUL(regs, hsr, *x, CONTEXTIDR_EL1); + break; + + /* * HCR_EL2.TACR * * ARMv8 (DDI 0487A.d): D7.2.1 diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c index 56c5514..f6be80f 100644 --- a/xen/arch/x86/hvm/event.c +++ b/xen/arch/x86/hvm/event.c @@ -27,33 +27,6 @@ #include <asm/vm_event.h> #include <public/vm_event.h> -bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old) -{ - struct vcpu *curr = current; - struct arch_domain *ad = &curr->domain->arch; - unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index); - - if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) && - (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) || - value != old) ) - { - bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask); - - vm_event_request_t req = { - .reason = VM_EVENT_REASON_WRITE_CTRLREG, - .vcpu_id = curr->vcpu_id, - .u.write_ctrlreg.index = index, - .u.write_ctrlreg.new_value = value, - .u.write_ctrlreg.old_value = old - }; - - vm_event_monitor_traps(curr, sync, &req); - return 1; - } - - return 0; -} - void hvm_event_msr(unsigned int msr, uint64_t value) { struct vcpu *curr = current; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 5bc2812..fc8cf12 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -37,6 +37,7 @@ #include <xen/mem_access.h> #include <xen/rangeset.h> #include <xen/vm_event.h> +#include <xen/monitor.h> #include <asm/shadow.h> #include <asm/hap.h> #include <asm/current.h> @@ -52,7 +53,6 @@ #include <asm/traps.h> #include <asm/mc146818rtc.h> #include <asm/mce.h> -#include <asm/monitor.h> #include <asm/hvm/hvm.h> #include <asm/hvm/vpt.h> #include <asm/hvm/support.h> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 9c5a388..b5cba00 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -25,6 +25,7 @@ #include <xen/domain_page.h> #include <xen/hypercall.h> #include <xen/perfc.h> +#include <xen/monitor.h> #include <asm/current.h> #include <asm/io.h> #include <asm/iocap.h> @@ -57,7 +58,6 @@ #include <asm/hvm/nestedhvm.h> #include <asm/altp2m.h> #include <asm/event.h> -#include <asm/monitor.h> #include <public/arch-x86/cpuid.h> static bool_t __initdata opt_force_ept; diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c index 1fec412..2f66f27 100644 --- a/xen/arch/x86/monitor.c +++ b/xen/arch/x86/monitor.c @@ -30,51 +30,6 @@ int arch_monitor_domctl_event(struct domain *d, switch ( mop->event ) { - case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: - { - unsigned int ctrlreg_bitmask; - bool_t old_status; - - /* sanity check: avoid left-shift undefined behavior */ - if ( unlikely(mop->u.mov_to_cr.index > 31) ) - return -EINVAL; - - ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index); - old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask); - - if ( unlikely(old_status == requested_status) ) - return -EEXIST; - - domain_pause(d); - - if ( mop->u.mov_to_cr.sync ) - ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; - else - ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask; - - if ( mop->u.mov_to_cr.onchangeonly ) - ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask; - else - ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; - - if ( requested_status ) - ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; - else - ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; - - if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index ) - { - struct vcpu *v; - /* Latches new CR3 mask through CR0 code. */ - for_each_vcpu ( d, v ) - hvm_update_guest_cr(v, 0); - } - - domain_unpause(d); - - break; - } - case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR: { bool_t old_status = ad->monitor.mov_to_msr_enabled; diff --git a/xen/common/monitor.c b/xen/common/monitor.c index d950a7c..e1e51a1 100644 --- a/xen/common/monitor.c +++ b/xen/common/monitor.c @@ -77,6 +77,54 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) break; } + case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: + { + struct arch_domain *ad = &d->arch; + unsigned int ctrlreg_bitmask; + bool_t old_status; + + /* sanity check: avoid left-shift undefined behavior */ + if ( unlikely(mop->u.mov_to_cr.index > 31) ) + return -EINVAL; + + ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index); + old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask); + + if ( unlikely(old_status == requested_status) ) + return -EEXIST; + + domain_pause(d); + + if ( mop->u.mov_to_cr.sync ) + ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; + else + ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask; + + if ( mop->u.mov_to_cr.onchangeonly ) + ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask; + else + ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; + + if ( requested_status ) + ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; + else + ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; + +#if CONFIG_X86 + if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index ) + { + struct vcpu *v; + /* Latches new CR3 mask through CR0 code. */ + for_each_vcpu ( d, v ) + hvm_update_guest_cr(v, 0); + } +#endif + + domain_unpause(d); + + break; + } + default: /* Give arch-side the chance to handle this event */ return arch_monitor_domctl_event(d, mop); diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c index 2906407..63c87e5 100644 --- a/xen/common/vm_event.c +++ b/xen/common/vm_event.c @@ -25,6 +25,7 @@ #include <xen/wait.h> #include <xen/vm_event.h> #include <xen/mem_access.h> +#include <xen/monitor.h> #include <asm/p2m.h> #include <asm/altp2m.h> #include <asm/vm_event.h> @@ -824,6 +825,34 @@ int vm_event_monitor_traps(struct vcpu *v, uint8_t sync, return 1; } +bool_t vm_event_monitor_cr(unsigned int index, unsigned long value, + unsigned long old) +{ + struct vcpu *curr = current; + struct arch_domain *ad = &curr->domain->arch; + unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index); + + if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) && + (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) || + value != old) ) + { + bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask); + + vm_event_request_t req = { + .reason = VM_EVENT_REASON_WRITE_CTRLREG, + .vcpu_id = curr->vcpu_id, + .u.write_ctrlreg.index = index, + .u.write_ctrlreg.new_value = value, + .u.write_ctrlreg.old_value = old + }; + + vm_event_monitor_traps(curr, sync, &req); + return 1; + } + + return 0; +} + void vm_event_monitor_guest_request(void) { struct vcpu *curr = current; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index c35ed40..53c1d5b 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -129,6 +129,14 @@ struct arch_domain paddr_t efi_acpi_gpa; paddr_t efi_acpi_len; #endif + + /* Arch-specific monitor options */ + struct { + unsigned int write_ctrlreg_enabled : 4; + unsigned int write_ctrlreg_sync : 4; + unsigned int write_ctrlreg_onchangeonly : 4; + } monitor; + } __cacheline_aligned; struct arch_vcpu diff --git a/xen/include/asm-arm/traps.h b/xen/include/asm-arm/traps.h new file mode 100644 index 0000000..8f2b06b --- /dev/null +++ b/xen/include/asm-arm/traps.h @@ -0,0 +1,227 @@ +/* + * include/asm-arm/traps.h + * + * ARM Trap handlers + * + * Copyright (c) 2016 BitDefender S.R.L. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ARM_TRAPS_H__ +#define __ASM_ARM_TRAPS_H__ + +#include <xen/vm_event.h> +#include <asm/regs.h> + +/* used to force expansion of args before calling macro */ +#define CALL_MACRO(macro, args...) macro(args) + +/* used for easy manipulation of low/high 32-bits of 64-bit system registers */ +typedef union { + uint64_t v; + struct { + uint32_t l; + uint32_t h; + }; +} sysreg64_t; + +#if CONFIG_ARM_64 + +/* + * Emulation of system-register trapped writes that do not cause + * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events. + * Such writes are collaterally trapped due to setting the HCR_EL2.TVM bit. + * + * Regarding AArch32 domains, note that from Xen's perspective system-registers + * of such domains are architecturally-mapped to AArch64 registers in one of + * three ways: + * - low 32-bits mapping (e.g. AArch32 DFAR -> AArch64 FAR_EL1[31:0]) + * - high 32-bits mapping (e.g. AArch32 IFAR -> AArch64 FAR_EL1[63:32]) + * - full mapping (e.g. AArch32 SCTLR -> AArch64 SCTLR_EL1) + * + * Hence we define 2 macro variants: + * - TVM_EMUL_SZ variant, for full mappings + * - TVM_EMUL_LH variant, for low/high 32-bits mappings + */ +#define TVM_EMUL_SZ(regs, hsr, val, sz, r) \ +{ \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + WRITE_SYSREG##sz((uint##sz##_t) (val), r); \ +} +#define TVM_EMUL_LH(regs, hsr, val, l_or_h, r) \ +{ \ + sysreg64_t _new; \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + _new.v = READ_SYSREG64(r); \ + _new.l_or_h = (uint32_t) (val); \ + WRITE_SYSREG64(_new.v, r); \ +} + +/* + * Emulation of system-register writes that might cause + * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events. + * SZ/LH variants, reasoning is the same as above. + */ +#define TVM_EMUL_SZ_VMEVT(regs, hsr, val, vmevt_r, sz, r) \ +{ \ + unsigned long _old; \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + _old = (unsigned long) READ_SYSREG##sz(r); \ + WRITE_SYSREG##sz((uint##sz##_t) (val), r); \ + vm_event_monitor_cr(vmevt_r, \ + (unsigned long) (uint##sz##_t) (val), \ + _old); \ +} +#define TVM_EMUL_LH_VMEVT(regs, hsr, val, vmevt_r, l_or_h, r) \ +{ \ + sysreg64_t _old, _new; \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + _new.v = (_old.v = READ_SYSREG64(r)); \ + _new.l_or_h = (uint32_t) (val); \ + WRITE_SYSREG64(_new.v, r); \ + vm_event_monitor_cr(vmevt_r, \ + (unsigned long) _new.v, \ + (unsigned long) _old.v); \ +} + +#define PART_FULL32 SZ,32 /* SZ variant, equivalent 32-bit counterpart */ +#define PART_FULL64 SZ,64 /* SZ variant, equivalent 64-bit counterpart */ +#define PART_LOW LH,l /* LH variant, low 32-bits (sysreg64.l) */ +#define PART_HIGH LH,h /* LH variant, high 32-bits (sysreg64.h) */ + +/* + * HCR_EL2.TVM trapped registers info (size in bits) for an AArch64 domain. + * + * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 (traps from AArch64 state) + */ +#define TVMINF_SCTLR_EL1 PART_FULL32, SCTLR_EL1 +#define TVMINF_TTBR0_EL1 PART_FULL64, TTBR0_EL1 +#define TVMINF_TTBR1_EL1 PART_FULL64, TTBR1_EL1 +#define TVMINF_TCR_EL1 PART_FULL64, TCR_EL1 +#define TVMINF_ESR_EL1 PART_FULL32, ESR_EL1 +#define TVMINF_FAR_EL1 PART_FULL64, FAR_EL1 +#define TVMINF_AFSR0_EL1 PART_FULL32, AFSR0_EL1 +#define TVMINF_AFSR1_EL1 PART_FULL32, AFSR1_EL1 +#define TVMINF_MAIR_EL1 PART_FULL64, MAIR_EL1 +#define TVMINF_AMAIR_EL1 PART_FULL64, AMAIR_EL1 +#define TVMINF_CONTEXTIDR_EL1 PART_FULL32, CONTEXTIDR_EL1 + +/* + * HCR_EL2.TVM trapped registers info for an AArch32 domain. + * Specifies the architecturally-mapped AArch64 counterpart register + * as well as the actual part of it the AArch32 register is mapped-to. + * + * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 (traps from AArch32 state) + */ +#define TVMINF_SCTLR PART_FULL32, SCTLR_EL1 +#define TVMINF_TTBR0_64 PART_FULL64, TTBR0_EL1 +#define TVMINF_TTBR1_64 PART_FULL64, TTBR1_EL1 +#define TVMINF_TTBR0_32 PART_LOW, TTBR0_EL1 +#define TVMINF_TTBR1_32 PART_LOW, TTBR1_EL1 +#define TVMINF_TTBCR PART_LOW, TCR_EL1 +#define TVMINF_DACR PART_FULL32, DACR32_EL2 +#define TVMINF_DFSR PART_FULL32, ESR_EL1 +#define TVMINF_IFSR PART_FULL32, IFSR32_EL2 +#define TVMINF_DFAR PART_LOW, FAR_EL1 +#define TVMINF_IFAR PART_HIGH, FAR_EL1 +#define TVMINF_ADFSR PART_FULL32, AFSR0_EL1 +#define TVMINF_AIFSR PART_FULL32, AFSR1_EL1 +#define TVMINF_MAIR0 PART_LOW, MAIR_EL1 /* AKA PRRR */ +#define TVMINF_MAIR1 PART_HIGH, MAIR_EL1 /* AKA NMRR */ +#define TVMINF_AMAIR0 PART_LOW, AMAIR_EL1 +#define TVMINF_AMAIR1 PART_HIGH, AMAIR_EL1 +#define TVMINF_CONTEXTIDR PART_FULL32, CONTEXTIDR_EL1 + +#define TVM_EMUL_VARIANT(regs, hsr, val, variant, sz_or_lh, r) \ + TVM_EMUL_##variant(regs, hsr, val, sz_or_lh, r) +#define TVM_EMUL_VARIANT_VMEVT(regs, hsr, val, vmevt_r, variant, sz_or_lh, r) \ + TVM_EMUL_##variant##_VMEVT(regs, hsr, val, vmevt_r, sz_or_lh, r) + +/* + * Wrappers over TVM_EMUL_{SZ,LH}/TVM_EMUL_{SZ,LH}_VMEVT variants which use + * the TVMINF_* defs. + */ +#define TVM_EMUL(regs, hsr, val, r) \ + CALL_MACRO(TVM_EMUL_VARIANT, regs, hsr, val, TVMINF_##r) +#define TVM_EMUL_VMEVT(regs, hsr, val, vmevt_r, r) \ + CALL_MACRO(TVM_EMUL_VARIANT_VMEVT, regs, hsr, val, vmevt_r, TVMINF_##r) + +#elif CONFIG_ARM_32 + +/* + * Emulation of system-register trapped writes that do not cause + * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events. + * Such writes are collaterally trapped due to setting HCR.TVM bit. + */ +#define TVM_EMUL_SZ(regs, hsr, val, sz, r...) \ +{ \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + WRITE_SYSREG##sz((uint##sz##_t) (val), r); \ +} + +/* + * Emulation of system-register writes that might cause + * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events. + */ +#define TVM_EMUL_SZ_VMEVT(regs, hsr, val, vmevt_r, sz, r...) \ +{ \ + unsigned long _old; \ + if ( psr_mode_is_user(regs) ) \ + return inject_undef_exception(regs, hsr); \ + _old = (unsigned long) READ_SYSREG##sz(r); \ + WRITE_SYSREG##sz((uint##sz##_t) (val), r); \ + vm_event_monitor_cr(vmevt_r, \ + (unsigned long) (uint##sz##_t) (val), \ + _old); \ +} + +/* + * HCR.TVM trapped registers info (size in bits and register to access) + * + * ARMv7 (DDI 0406C.b): B1.14.13 + */ +#define TVMINF_SCTLR 32,SCTLR +#define TVMINF_TTBR0_64 64,TTBR0 +#define TVMINF_TTBR1_64 64,TTBR1 +#define TVMINF_TTBR0_32 32,TTBR0_32 +#define TVMINF_TTBR1_32 32,TTBR1_32 +#define TVMINF_TTBCR 32,TTBCR +#define TVMINF_DACR 32,DACR +#define TVMINF_DFSR 32,DFSR +#define TVMINF_IFSR 32,IFSR +#define TVMINF_DFAR 32,DFAR +#define TVMINF_IFAR 32,IFAR +#define TVMINF_ADFSR 32,ADFSR +#define TVMINF_AIFSR 32,AIFSR +#define TVMINF_MAIR0 32,MAIR0 /* AKA PRRR */ +#define TVMINF_MAIR1 32,MAIR1 /* AKA NMRR */ +#define TVMINF_AMAIR0 32,AMAIR0 +#define TVMINF_AMAIR1 32,AMAIR1 +#define TVMINF_CONTEXTIDR 32,CONTEXTIDR + +/* Wrappers over TVM_EMUL_SZ/TVM_EMUL_SZ_VMEVT which use the TVMINF_* defs. */ +#define TVM_EMUL(regs, hsr, val, r...) \ + CALL_MACRO(TVM_EMUL_SZ, regs, hsr, val, TVMINF_##r) +#define TVM_EMUL_VMEVT(regs, hsr, val, vmevt_r, r...) \ + CALL_MACRO(TVM_EMUL_SZ_VMEVT, regs, hsr, val, vmevt_r, TVMINF_##r) + +#endif + +#endif /* __ASM_ARM_TRAPS_H__ */ diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h index 014d9ba..a1b9f5b 100644 --- a/xen/include/asm-arm/vm_event.h +++ b/xen/include/asm-arm/vm_event.h @@ -33,6 +33,7 @@ int vm_event_init_domain(struct domain *d) static inline void vm_event_cleanup_domain(struct domain *d) { + memset(&d->arch.monitor, 0, sizeof(d->arch.monitor)); memset(&d->monitor, 0, sizeof(d->monitor)); } @@ -63,7 +64,8 @@ static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d) { uint32_t capabilities = 0; - capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST); + capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) | + (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST); return capabilities; } diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h index 03f7fee..923d970 100644 --- a/xen/include/asm-x86/hvm/event.h +++ b/xen/include/asm-x86/hvm/event.h @@ -21,6 +21,7 @@ #include <xen/sched.h> #include <xen/paging.h> +#include <xen/vm_event.h> #include <public/vm_event.h> enum hvm_event_breakpoint_type @@ -29,19 +30,13 @@ enum hvm_event_breakpoint_type HVM_EVENT_SINGLESTEP_BREAKPOINT, }; -/* - * Called for current VCPU on crX/MSR changes by guest. - * The event might not fire if the client has subscribed to it in onchangeonly - * mode, hence the bool_t return type for control register write events. - */ -bool_t hvm_event_cr(unsigned int index, unsigned long value, - unsigned long old); -#define hvm_event_crX(cr, new, old) \ - hvm_event_cr(VM_EVENT_X86_##cr, new, old) void hvm_event_msr(unsigned int msr, uint64_t value); int hvm_event_breakpoint(unsigned long rip, enum hvm_event_breakpoint_type type); +#define hvm_event_crX(cr, new, old) \ + vm_event_monitor_cr(VM_EVENT_X86_##cr, new, old) + #endif /* __ASM_X86_HVM_EVENT_H__ */ /* diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h index 0954b59..4c0dc2e 100644 --- a/xen/include/asm-x86/monitor.h +++ b/xen/include/asm-x86/monitor.h @@ -27,8 +27,6 @@ #include <asm/cpufeature.h> #include <asm/hvm/hvm.h> -#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index)) - static inline int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) { diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h index 9270d52..864d72d 100644 --- a/xen/include/public/vm_event.h +++ b/xen/include/public/vm_event.h @@ -120,12 +120,18 @@ /* An event has been requested via HVMOP_guest_request_vm_event. */ #define VM_EVENT_REASON_GUEST_REQUEST 8 -/* Supported values for the vm_event_write_ctrlreg index. */ +/* Supported values for the vm_event_write_ctrlreg index (x86). */ #define VM_EVENT_X86_CR0 0 #define VM_EVENT_X86_CR3 1 #define VM_EVENT_X86_CR4 2 #define VM_EVENT_X86_XCR0 3 +/* Supported values for the vm_event_write_ctrlreg index (arm). */ +#define VM_EVENT_ARM_SCTLR 0 /* SCTLR_EL1 (aarch64), SCTLR (aarch32) */ +#define VM_EVENT_ARM_TTBR0 1 /* TTBR0_EL1 (aarch64), TTBR0 (aarch32) */ +#define VM_EVENT_ARM_TTBR1 2 /* TTBR1_EL1 (aarch64), TTBR1 (aarch32) */ +#define VM_EVENT_ARM_TTBCR 3 /* TCR_EL1 (aarch64), TTBCR (aarch32) */ + /* * Using a custom struct (not hvm_hw_cpu) so as to not fill * the vm_event ring buffer too quickly. diff --git a/xen/include/xen/monitor.h b/xen/include/xen/monitor.h index 7015e6d..dc5b9d7 100644 --- a/xen/include/xen/monitor.h +++ b/xen/include/xen/monitor.h @@ -25,6 +25,8 @@ struct domain; struct xen_domctl_monitor_op; +#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index)) + int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *op); #endif /* __XEN_MONITOR_H__ */ diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h index beda9fe..3eb7e9a 100644 --- a/xen/include/xen/vm_event.h +++ b/xen/include/xen/vm_event.h @@ -81,6 +81,14 @@ void vm_event_vcpu_unpause(struct vcpu *v); int vm_event_monitor_traps(struct vcpu *v, uint8_t sync, vm_event_request_t *req); +/* + * Called for current VCPU on control-register changes by guest. + * The event might not fire if the client has subscribed to it in onchangeonly + * mode, hence the bool_t return type for control register write events. + */ +bool_t vm_event_monitor_cr(unsigned int index, unsigned long value, + unsigned long old); + void vm_event_monitor_guest_request(void); #endif /* __VM_EVENT_H__ */ -- 2.5.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |