[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/7] vm-event/arm: implement support for control-register write vm-events



Add ARM support for control-register write monitoring through the vm-events
subsystem.

Chosen ARM system control-registers that can be monitored are:
    - VM_EVENT_ARM_SCTLR:      AArch32 SCTLR, AArch64 SCTLR_EL1
    - VM_EVENT_ARM_TTBR{0,1}:  AArch32 TTBR{0,1}, AArch64 TTBR{0,1}_EL1
    - VM_EVENT_ARM_TTBCR:      AArch32 TTBCR, AArch64 TCR_EL1

Trapping of write operations of these registers was attained by setting the
HCR_EL2.TVM / HCR.TVM bit.

Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
---
 MAINTAINERS                    |   1 +
 xen/arch/arm/Makefile          |   1 +
 xen/arch/arm/traps.c           | 126 +++++++++++++++++++-
 xen/arch/arm/vm_event.c        | 112 ++++++++++++++++++
 xen/common/monitor.c           |   2 -
 xen/common/vm_event.c          |   2 -
 xen/include/asm-arm/domain.h   |  30 +++++
 xen/include/asm-arm/traps.h    | 253 +++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/vm_event.h |  22 +++-
 xen/include/public/vm_event.h  |   8 +-
 xen/include/xen/monitor.h      |   2 -
 xen/include/xen/vm_event.h     |   2 -
 12 files changed, 543 insertions(+), 18 deletions(-)
 create mode 100644 xen/arch/arm/vm_event.c
 create mode 100644 xen/include/asm-arm/traps.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 9a224d4..634f359 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -402,6 +402,7 @@ M:  Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
 S:     Supported
 F:     xen/common/mem_access.c
 F:     xen/*/vm_event.c
+F:     xen/arch/*/vm_event.c
 F:     xen/*/monitor.c
 F:     xen/include/*/mem_access.h
 F:     xen/include/*/monitor.h
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 9e38da3..390df0a 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -40,6 +40,7 @@ obj-y += device.o
 obj-y += decode.o
 obj-y += processor.o
 obj-y += smc.o
+obj-y += vm_event.o
 obj-$(CONFIG_LIVEPATCH) += livepatch.o
 
 #obj-bin-y += ....o
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 8c50685..af61ac3 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -43,6 +43,7 @@
 #include <asm/mmio.h>
 #include <asm/cpufeature.h>
 #include <asm/flushtlb.h>
+#include <asm/traps.h>
 
 #include "decode.h"
 #include "vtimer.h"
@@ -124,7 +125,12 @@ void init_traps(void)
     WRITE_SYSREG((HCPTR_CP_MASK & ~(HCPTR_CP(10) | HCPTR_CP(11))) | HCPTR_TTA,
                  CPTR_EL2);
 
-    /* Setup hypervisor traps */
+    /* Setup hypervisor traps
+     *
+     * Note: HCR_TVM bit is also set for system-register write monitoring
+     * purposes (see vm_event_monitor_cr), but (for performance reasons) that's
+     * done selectively (see vcpu_enter_adjust_traps).
+     */
     WRITE_SYSREG(HCR_PTW|HCR_BSU_INNER|HCR_AMO|HCR_IMO|HCR_FMO|HCR_VM|
                  HCR_TWE|HCR_TWI|HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP|HCR_FB,
                  HCR_EL2);
@@ -1720,6 +1726,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
     const struct hsr_cp32 cp32 = hsr.cp32;
     int regidx = cp32.reg;
     struct vcpu *v = current;
+    register_t r = get_user_reg(regs, regidx);
 
     if ( !check_conditional_instr(regs, hsr) )
     {
@@ -1730,6 +1737,61 @@ static void do_cp15_32(struct cpu_user_regs *regs,
     switch ( hsr.bits & HSR_CP32_REGS_MASK )
     {
     /*
+     * HCR_EL2.TVM / HCR.TVM
+     *
+     * ARMv7 (DDI 0406C.b): B1.14.13
+     * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34
+     */
+    case HSR_CPREG32(SCTLR):
+        TVM_EMUL_VMEVT(v, regs, hsr, r, SCTLR);
+        break;
+    case HSR_CPREG32(TTBR0_32):
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TTBR0_32);
+        break;
+    case HSR_CPREG32(TTBR1_32):
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TTBR1_32);
+        break;
+    case HSR_CPREG32(TTBCR):
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TTBCR);
+        break;
+    case HSR_CPREG32(DACR):
+        TVM_EMUL(regs, hsr, r, DACR);
+        break;
+    case HSR_CPREG32(DFSR):
+        TVM_EMUL(regs, hsr, r, DFSR);
+        break;
+    case HSR_CPREG32(IFSR):
+        TVM_EMUL(regs, hsr, r, IFSR);
+        break;
+    case HSR_CPREG32(DFAR):
+        TVM_EMUL(regs, hsr, r, DFAR);
+        break;
+    case HSR_CPREG32(IFAR):
+        TVM_EMUL(regs, hsr, r, IFAR);
+        break;
+    case HSR_CPREG32(ADFSR):
+        TVM_EMUL(regs, hsr, r, ADFSR);
+        break;
+    case HSR_CPREG32(AIFSR):
+        TVM_EMUL(regs, hsr, r, AIFSR);
+        break;
+    case HSR_CPREG32(MAIR0):
+        TVM_EMUL(regs, hsr, r, MAIR0);
+        break;
+    case HSR_CPREG32(MAIR1):
+        TVM_EMUL(regs, hsr, r, MAIR1);
+        break;
+    case HSR_CPREG32(AMAIR0):
+        TVM_EMUL(regs, hsr, r, AMAIR0);
+        break;
+    case HSR_CPREG32(AMAIR1):
+        TVM_EMUL(regs, hsr, r, AMAIR1);
+        break;
+    case HSR_CPREG32(CONTEXTIDR):
+        TVM_EMUL(regs, hsr, r, CONTEXTIDR);
+        break;
+
+    /*
      * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN
      *
      * ARMv7 (DDI 0406C.b): B4.1.22
@@ -1853,6 +1915,13 @@ static void do_cp15_32(struct cpu_user_regs *regs,
 static void do_cp15_64(struct cpu_user_regs *regs,
                        const union hsr hsr)
 {
+    struct vcpu *v = current;
+    const struct hsr_cp64 cp64 = hsr.cp64;
+    sysreg64_t r = {
+        .low32 = (uint32_t) get_user_reg(regs, cp64.reg1),
+        .high32 = (uint32_t) get_user_reg(regs, cp64.reg2)
+    };
+
     if ( !check_conditional_instr(regs, hsr) )
     {
         advance_pc(regs, hsr);
@@ -1862,6 +1931,19 @@ static void do_cp15_64(struct cpu_user_regs *regs,
     switch ( hsr.bits & HSR_CP64_REGS_MASK )
     {
     /*
+     * HCR_EL2.TVM / HCR.TVM
+     *
+     * ARMv7 (DDI 0406C.b): B1.14.13
+     * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34
+     */
+    case HSR_CPREG64(TTBR0):
+        TVM_EMUL_VMEVT(v, regs, hsr, r.val64, TTBR0_64);
+        break;
+    case HSR_CPREG64(TTBR1):
+        TVM_EMUL_VMEVT(v, regs, hsr, r.val64, TTBR1_64);
+        break;
+
+    /*
      * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN
      *
      * ARMv7 (DDI 0406C.b): B4.1.22
@@ -1891,8 +1973,6 @@ static void do_cp15_64(struct cpu_user_regs *regs,
      */
     default:
         {
-            const struct hsr_cp64 cp64 = hsr.cp64;
-
             gdprintk(XENLOG_ERR,
                      "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
                      cp64.read ? "mrrc" : "mcrr",
@@ -2128,10 +2208,50 @@ static void do_sysreg(struct cpu_user_regs *regs,
 {
     int regidx = hsr.sysreg.reg;
     struct vcpu *v = current;
+    register_t r = get_user_reg(regs, regidx);
 
     switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
     {
     /*
+     * HCR_EL2.TVM
+     *
+     * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34
+     */
+    case HSR_SYSREG_SCTLR_EL1:
+        TVM_EMUL_VMEVT(v, regs, hsr, r, SCTLR_EL1);
+        break;
+    case HSR_SYSREG_TTBR0_EL1:
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TTBR0_EL1);
+        break;
+    case HSR_SYSREG_TTBR1_EL1:
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TTBR1_EL1);
+        break;
+    case HSR_SYSREG_TCR_EL1:
+        TVM_EMUL_VMEVT(v, regs, hsr, r, TCR_EL1);
+        break;
+    case HSR_SYSREG_ESR_EL1:
+        TVM_EMUL(regs, hsr, r, ESR_EL1);
+        break;
+    case HSR_SYSREG_FAR_EL1:
+        TVM_EMUL(regs, hsr, r, FAR_EL1);
+        break;
+    case HSR_SYSREG_AFSR0_EL1:
+        TVM_EMUL(regs, hsr, r, AFSR0_EL1);
+        break;
+    case HSR_SYSREG_AFSR1_EL1:
+        TVM_EMUL(regs, hsr, r, AFSR1_EL1);
+        break;
+    case HSR_SYSREG_MAIR_EL1:
+        TVM_EMUL(regs, hsr, r, MAIR_EL1);
+        break;
+    case HSR_SYSREG_AMAIR_EL1:
+        TVM_EMUL(regs, hsr, r, AMAIR_EL1);
+        break;
+    case HSR_SYSREG_CONTEXTIDR_EL1:
+        TVM_EMUL(regs, hsr, r, CONTEXTIDR_EL1);
+        break;
+
+    /*
      * HCR_EL2.TACR
      *
      * ARMv8 (DDI 0487A.d): D7.2.1
diff --git a/xen/arch/arm/vm_event.c b/xen/arch/arm/vm_event.c
new file mode 100644
index 0000000..3f23fec
--- /dev/null
+++ b/xen/arch/arm/vm_event.c
@@ -0,0 +1,112 @@
+/*
+ * arch/arm/vm_event.c
+ *
+ * Architecture-specific vm_event handling routines
+ *
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/vm_event.h>
+#include <asm/traps.h>
+
+#if CONFIG_ARM_64
+
+#define MWSINF_SCTLR    32,SCTLR_EL1
+#define MWSINF_TTBR0    64,TTBR0_EL1
+#define MWSINF_TTBR1    64,TTBR1_EL1
+#define MWSINF_TTBCR    64,TCR_EL1
+
+#elif CONFIG_ARM_32
+
+#define MWSINF_SCTLR    32,SCTLR
+#define MWSINF_TTBR0    64,TTBR0
+#define MWSINF_TTBR1    64,TTBR1
+#define MWSINF_TTBR0_32 32,TTBR0_32
+#define MWSINF_TTBR1_32 32,TTBR1_32
+#define MWSINF_TTBCR    32,TTBCR
+
+#endif
+
+#define MWS_EMUL_(val, sz, r...)    WRITE_SYSREG##sz((uint##sz##_t) (val), r)
+#define MWS_EMUL(r)                 CALL_MACRO(MWS_EMUL_, w->value, MWSINF_##r)
+
+static inline void vcpu_enter_write_data(struct vcpu *v)
+{
+    struct monitor_write_data *w = &v->arch.vm_event.write_data;
+
+    if ( likely(MWS_NOWRITE == w->status) )
+        return;
+
+    switch ( w->status )
+    {
+    case MWS_SCTLR:
+        MWS_EMUL(SCTLR);
+        break;
+    case MWS_TTBR0:
+        MWS_EMUL(TTBR0);
+        break;
+    case MWS_TTBR1:
+        MWS_EMUL(TTBR1);
+        break;
+#if CONFIG_ARM_32
+    case MWS_TTBR0_32:
+        MWS_EMUL(TTBR0_32);
+        break;
+    case MWS_TTBR1_32:
+        MWS_EMUL(TTBR1_32);
+        break;
+#endif
+    case MWS_TTBCR:
+        MWS_EMUL(TTBCR);
+        break;
+    default:
+        break;
+    }
+
+    w->status = MWS_NOWRITE;
+}
+
+static inline void vcpu_enter_adjust_traps(struct vcpu *v)
+{
+    register_t old_hcr, hcr;
+
+    hcr = (old_hcr = READ_SYSREG(HCR_EL2));
+
+    if ( unlikely(0 != v->domain->arch.monitor.write_ctrlreg_enabled) )
+        hcr |= HCR_TVM;
+    else
+        hcr &= ~HCR_TVM;
+
+    if ( unlikely(hcr != old_hcr) )
+    {
+        WRITE_SYSREG(hcr, HCR_EL2);
+        isb();
+    }
+}
+
+void arch_vm_event_vcpu_enter(struct vcpu *v)
+{
+    vcpu_enter_write_data(v);
+    vcpu_enter_adjust_traps(v);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index 2366bae..c35a717 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -62,7 +62,6 @@ int monitor_domctl(struct domain *d, struct 
xen_domctl_monitor_op *mop)
 
     switch ( mop->event )
     {
-#if CONFIG_X86
     case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
     {
         struct arch_domain *ad = &d->arch;
@@ -100,7 +99,6 @@ int monitor_domctl(struct domain *d, struct 
xen_domctl_monitor_op *mop)
 
         break;
     }
-#endif
 
     case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
     {
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 53dc048..e0f999e 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -824,7 +824,6 @@ int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
     return 1;
 }
 
-#if CONFIG_X86
 bool_t vm_event_monitor_cr(unsigned int index, unsigned long value,
                            unsigned long old)
 {
@@ -852,7 +851,6 @@ bool_t vm_event_monitor_cr(unsigned int index, unsigned 
long value,
 
     return 0;
 }
-#endif
 
 void vm_event_monitor_guest_request(void)
 {
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 370cdeb..83f0952 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -40,6 +40,26 @@ struct vtimer {
         uint64_t cval;
 };
 
+enum monitor_write_status
+{
+    MWS_NOWRITE = 0,
+    MWS_SCTLR,
+    MWS_TTBR0,
+    MWS_TTBR1,
+    MWS_TTBR0_32,
+    MWS_TTBR1_32,
+    MWS_TTBCR,
+};
+
+struct monitor_write_data {
+    enum monitor_write_status status;
+    uint64_t value;
+};
+
+struct arch_vm_event {
+    struct monitor_write_data write_data;
+};
+
 struct arch_domain
 {
 #ifdef CONFIG_ARM_64
@@ -127,6 +147,14 @@ struct arch_domain
     paddr_t efi_acpi_gpa;
     paddr_t efi_acpi_len;
 #endif
+
+    /* Arch-specific monitor options */
+    struct {
+        unsigned int write_ctrlreg_enabled       : 4;
+        unsigned int write_ctrlreg_sync          : 4;
+        unsigned int write_ctrlreg_onchangeonly  : 4;
+    } monitor;
+
 }  __cacheline_aligned;
 
 struct arch_vcpu
@@ -258,6 +286,8 @@ struct arch_vcpu
     struct vtimer phys_timer;
     struct vtimer virt_timer;
     bool_t vtimer_initialized;
+
+    struct arch_vm_event vm_event;
 }  __cacheline_aligned;
 
 void vcpu_show_execution_state(struct vcpu *);
diff --git a/xen/include/asm-arm/traps.h b/xen/include/asm-arm/traps.h
new file mode 100644
index 0000000..9e246a7
--- /dev/null
+++ b/xen/include/asm-arm/traps.h
@@ -0,0 +1,253 @@
+/*
+ * include/asm-arm/traps.h
+ *
+ * ARM Trap handlers
+ *
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_TRAPS_H__
+#define __ASM_ARM_TRAPS_H__
+
+#include <xen/vm_event.h>
+#include <asm/regs.h>
+#include <public/vm_event.h>
+
+/* used to force expansion of args before calling macro */
+#define CALL_MACRO(macro, args...)      macro(args)
+
+/* used for easy manipulation of low/high 32-bits of 64-bit system registers */
+typedef union {
+    uint64_t val64;
+    struct {
+        uint32_t low32;
+        uint32_t high32;
+    };
+} sysreg64_t;
+
+#if CONFIG_ARM_64
+
+/*
+ * Emulation of system-register trapped writes that do not cause
+ * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events.
+ * Such writes are collaterally trapped due to setting the HCR_EL2.TVM bit.
+ *
+ * Regarding aarch32 domains, note that from Xen's perspective system-registers
+ * of such domains are architecturally-mapped to aarch64 registers in one of
+ * three ways:
+ *  - low 32-bits mapping   (e.g. aarch32 DFAR -> aarch64 FAR_EL1[31:0])
+ *  - high 32-bits mapping  (e.g. aarch32 IFAR -> aarch64 FAR_EL1[63:32])
+ *  - full mapping          (e.g. aarch32 SCTLR -> aarch64 SCTLR_EL1)
+ *
+ * Hence we define 2 macro variants:
+ *  - TVM_EMUL_SZ variant, for full mappings
+ *  - TVM_EMUL_LH variant, for low/high 32-bits mappings
+ */
+#define TVM_EMUL_SZ(regs, hsr, val, sz, r...)                           \
+{                                                                       \
+    if ( psr_mode_is_user(regs) )                                       \
+        return inject_undef_exception(regs, hsr);                       \
+    WRITE_SYSREG##sz((uint##sz##_t) (val), r);                          \
+}
+#define TVM_EMUL_LH(regs, hsr, val, l_or_h, r...)                       \
+{                                                                       \
+    sysreg64_t _new;                                                    \
+    if ( psr_mode_is_user(regs) )                                       \
+        return inject_undef_exception(regs, hsr);                       \
+    _new.val64 = READ_SYSREG64(r);                                      \
+    _new.l_or_h = (uint32_t) (val);                                     \
+    WRITE_SYSREG64(_new.val64, r);                                      \
+}
+
+/*
+ * Emulation of system-register writes that cause VM_EVENT_REASON_WRITE_CTRLREG
+ * monitor vm-events.
+ * SZ/LH variants, reasoning is the same as above.
+ */
+#define TVM_EMUL_SZ_VMEVT(v, regs, hsr, val, cridx, mws, sz, r)         \
+{                                                                       \
+    unsigned long _old;                                                 \
+    if ( psr_mode_is_user(regs) )                                       \
+        return inject_undef_exception(regs, hsr);                       \
+    ASSERT(MWS_NOWRITE == (v)->arch.vm_event.write_data.status);        \
+    (v)->arch.vm_event.write_data.status = mws;                         \
+    (v)->arch.vm_event.write_data.value = (uint##sz##_t) (val);         \
+    _old = READ_SYSREG##sz(r);                                          \
+    vm_event_monitor_cr(cridx,                                          \
+                        (uint##sz##_t) (val),                           \
+                        _old);                                          \
+}
+#define TVM_EMUL_LH_VMEVT(v, regs, hsr, val, cridx, mws, l_or_h, r)     \
+{                                                                       \
+    sysreg64_t _old, _new;                                              \
+    if ( psr_mode_is_user(regs) )                                       \
+        return inject_undef_exception(regs, hsr);                       \
+    _new.val64 = (_old.val64 = READ_SYSREG64(r));                       \
+    _new.l_or_h = (uint32_t) (val);                                     \
+    ASSERT(MWS_NOWRITE == (v)->arch.vm_event.write_data.status);        \
+    (v)->arch.vm_event.write_data.status = mws;                         \
+    (v)->arch.vm_event.write_data.value = _new.val64;                   \
+    vm_event_monitor_cr(cridx,                                          \
+                        _new.val64,                                     \
+                        _old.val64);                                    \
+}
+
+#define PART_FULL32     SZ,32       /* SZ variant, 32-bit counterpart */
+#define PART_FULL64     SZ,64       /* SZ variant, 64-bit counterpart */
+#define PART_LOW        LH,low32    /* LH variant, low 32-bits */
+#define PART_HIGH       LH,high32   /* LH variant, high 32-bits */
+
+/*
+ * HCR_EL2.TVM trapped registers info (size in bits) for an aarch64 domain.
+ *
+ * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 (traps from AArch64 state)
+ */
+#define TVMINF_ESR_EL1          PART_FULL32,    ESR_EL1
+#define TVMINF_FAR_EL1          PART_FULL64,    FAR_EL1
+#define TVMINF_AFSR0_EL1        PART_FULL32,    AFSR0_EL1
+#define TVMINF_AFSR1_EL1        PART_FULL32,    AFSR1_EL1
+#define TVMINF_MAIR_EL1         PART_FULL64,    MAIR_EL1
+#define TVMINF_AMAIR_EL1        PART_FULL64,    AMAIR_EL1
+#define TVMINF_CONTEXTIDR_EL1   PART_FULL32,    CONTEXTIDR_EL1
+
+#define TVMINF_VMEVT_SCTLR_EL1  VM_EVENT_ARM_SCTLR, MWS_SCTLR, \
+                                PART_FULL32,    SCTLR_EL1
+#define TVMINF_VMEVT_TTBR0_EL1  VM_EVENT_ARM_TTBR0, MWS_TTBR0, \
+                                PART_FULL64,    TTBR0_EL1
+#define TVMINF_VMEVT_TTBR1_EL1  VM_EVENT_ARM_TTBR1, MWS_TTBR1, \
+                                PART_FULL64,    TTBR1_EL1
+#define TVMINF_VMEVT_TCR_EL1    VM_EVENT_ARM_TTBCR, MWS_TTBCR, \
+                                PART_FULL64,    TCR_EL1
+
+/*
+ * HCR_EL2.TVM trapped registers info for an aarch32 domain.
+ * Specifies the architecturally-mapped aarch64 counterpart register
+ * as well as the actual part the aarch32 register is mapped-to
+ * (full register/low 32-bits/high 32-bits).
+ *
+ * ARMv8 (DDI 0487A.e): D1-1569 Table D1-34 (traps from aarch32 state)
+ */
+#define TVMINF_DACR             PART_FULL32,    DACR32_EL2
+#define TVMINF_DFSR             PART_FULL32,    ESR_EL1
+#define TVMINF_IFSR             PART_FULL32,    IFSR32_EL2
+#define TVMINF_DFAR             PART_LOW,       FAR_EL1
+#define TVMINF_IFAR             PART_HIGH,      FAR_EL1
+#define TVMINF_ADFSR            PART_FULL32,    AFSR0_EL1
+#define TVMINF_AIFSR            PART_FULL32,    AFSR1_EL1
+#define TVMINF_MAIR0            PART_LOW,       MAIR_EL1        /* AKA PRRR */
+#define TVMINF_MAIR1            PART_HIGH,      MAIR_EL1        /* AKA NMRR */
+#define TVMINF_AMAIR0           PART_LOW,       AMAIR_EL1
+#define TVMINF_AMAIR1           PART_HIGH,      AMAIR_EL1
+#define TVMINF_CONTEXTIDR       PART_FULL32,    CONTEXTIDR_EL1
+
+#define TVMINF_VMEVT_SCTLR      VM_EVENT_ARM_SCTLR, MWS_SCTLR, \
+                                PART_FULL32,    SCTLR_EL1
+#define TVMINF_VMEVT_TTBR0_64   VM_EVENT_ARM_TTBR0, MWS_TTBR0, \
+                                PART_FULL64,    TTBR0_EL1
+#define TVMINF_VMEVT_TTBR1_64   VM_EVENT_ARM_TTBR1, MWS_TTBR1, \
+                                PART_FULL64,    TTBR1_EL1
+#define TVMINF_VMEVT_TTBR0_32   VM_EVENT_ARM_TTBR0, MWS_TTBR0, \
+                                PART_LOW,       TTBR0_EL1
+#define TVMINF_VMEVT_TTBR1_32   VM_EVENT_ARM_TTBR1, MWS_TTBR1, \
+                                PART_LOW,       TTBR1_EL1
+#define TVMINF_VMEVT_TTBCR      VM_EVENT_ARM_TTBCR, MWS_TTBCR, \
+                                PART_LOW,       TCR_EL1
+
+/*
+ * Wrappers over TVM_EMUL_{SZ,LH}/TVM_EMUL_{SZ,LH}_VMEVT variants which use
+ * the TVMINF_* defs.
+ */
+#define TVM_EMUL_VAR(regs, hsr, val, var, sz_or_lh, r...) \
+        TVM_EMUL_##var(regs, hsr, val, sz_or_lh, r)
+#define TVM_EMUL_VAR_VMEVT(v, regs, hsr, val, cridx, mws, var, sz_or_lh, r...) 
\
+        TVM_EMUL_##var##_VMEVT(v, regs, hsr, val, cridx, mws, sz_or_lh, r)
+#define TVM_EMUL(regs, hsr, val, r) \
+        CALL_MACRO(TVM_EMUL_VAR, regs, hsr, val, TVMINF_##r)
+#define TVM_EMUL_VMEVT(v, regs, hsr, val, r) \
+        CALL_MACRO(TVM_EMUL_VAR_VMEVT, v, regs, hsr, val, TVMINF_VMEVT_##r)
+
+#elif CONFIG_ARM_32
+
+/*
+ * Emulation of system-register trapped writes that do not cause
+ * VM_EVENT_REASON_WRITE_CTRLREG monitor vm-events.
+ * Such writes are collaterally trapped due to setting the HCR.TVM bit.
+ */
+#define TVM_EMUL_SZ(regs, hsr, val, sz, r...)                       \
+{                                                                   \
+    if ( psr_mode_is_user(regs) )                                   \
+        return inject_undef_exception(regs, hsr);                   \
+    WRITE_SYSREG##sz((uint##sz##_t) (val), r);                      \
+}
+
+/*
+ * Emulation of system-register writes that cause VM_EVENT_REASON_WRITE_CTRLREG
+ * monitor vm-events.
+ */
+#define TVM_EMUL_SZ_VMEVT(v, regs, hsr, val, cridx, mws, sz, r...)  \
+{                                                                   \
+    unsigned long _old;                                             \
+    if ( psr_mode_is_user(regs) )                                   \
+        return inject_undef_exception(regs, hsr);                   \
+    ASSERT(MWS_NOWRITE == (v)->arch.vm_event.write_data.status);    \
+    (v)->arch.vm_event.write_data.status = mws;                     \
+    (v)->arch.vm_event.write_data.value = (uint##sz##_t) (val);     \
+    _old = READ_SYSREG##sz(r);                                      \
+    vm_event_monitor_cr(cridx,                                      \
+                        (uint##sz##_t) (val),                       \
+                        _old);                                      \
+}
+
+/*
+ * HCR.TVM trapped registers info (size in bits).
+ *
+ * ARMv7 (DDI 0406C.b): B1.14.13
+ */
+#define TVMINF_DACR             32,DACR
+#define TVMINF_DFSR             32,DFSR
+#define TVMINF_IFSR             32,IFSR
+#define TVMINF_DFAR             32,DFAR
+#define TVMINF_IFAR             32,IFAR
+#define TVMINF_ADFSR            32,ADFSR
+#define TVMINF_AIFSR            32,AIFSR
+#define TVMINF_MAIR0            32,MAIR0        /* AKA PRRR */
+#define TVMINF_MAIR1            32,MAIR1        /* AKA NMRR */
+#define TVMINF_AMAIR0           32,AMAIR0
+#define TVMINF_AMAIR1           32,AMAIR1
+#define TVMINF_CONTEXTIDR       32,CONTEXTIDR
+
+#define TVMINF_VMEVT_SCTLR      VM_EVENT_ARM_SCTLR, MWS_SCTLR, \
+                                32,SCTLR
+#define TVMINF_VMEVT_TTBR0_64   VM_EVENT_ARM_TTBR0, MWS_TTBR0, \
+                                64,TTBR0
+#define TVMINF_VMEVT_TTBR1_64   VM_EVENT_ARM_TTBR1, MWS_TTBR1, \
+                                64,TTBR1
+#define TVMINF_VMEVT_TTBR0_32   VM_EVENT_ARM_TTBR0, MWS_TTBR0_32, \
+                                32,TTBR0_32
+#define TVMINF_VMEVT_TTBR1_32   VM_EVENT_ARM_TTBR1, MWS_TTBR1_32, \
+                                32,TTBR1_32
+#define TVMINF_VMEVT_TTBCR      VM_EVENT_ARM_TTBCR, MWS_TTBCR, \
+                                32,TTBCR
+
+/* Wrappers over TVM_EMUL_SZ/TVM_EMUL_SZ_VMEVT which use the TVMINF_* defs. */
+#define TVM_EMUL(regs, hsr, val, r) \
+        CALL_MACRO(TVM_EMUL_SZ, regs, hsr, val, TVMINF_##r)
+#define TVM_EMUL_VMEVT(v, regs, hsr, val, r) \
+        CALL_MACRO(TVM_EMUL_SZ_VMEVT, v, regs, hsr, val, TVMINF_VMEVT_##r)
+
+#endif
+
+#endif /* __ASM_ARM_TRAPS_H__ */
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index 4e5a272..edf9654 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -30,6 +30,12 @@ static inline int vm_event_init_domain(struct domain *d)
 
 static inline void vm_event_cleanup_domain(struct domain *d)
 {
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+        memset(&v->arch.vm_event, 0, sizeof(v->arch.vm_event));
+
+    memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
     memset(&d->monitor, 0, sizeof(d->monitor));
 }
 
@@ -41,7 +47,13 @@ static inline void vm_event_toggle_singlestep(struct domain 
*d, struct vcpu *v)
 static inline
 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
 {
-    /* Not supported on ARM. */
+    /* X86 VM_EVENT_REASON_MOV_TO_MSR could (but shouldn't) end-up here too. */
+    if ( unlikely(VM_EVENT_REASON_WRITE_CTRLREG != rsp->reason) )
+        return;
+
+    if ( (rsp->flags & VM_EVENT_FLAG_DENY) &&
+         (rsp->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
+        v->arch.vm_event.write_data.status = MWS_NOWRITE;
 }
 
 static inline
@@ -55,10 +67,7 @@ static inline void vm_event_fill_regs(vm_event_request_t 
*req)
     /* Not supported on ARM. */
 }
 
-static inline void arch_vm_event_vcpu_enter(struct vcpu *v)
-{
-    /* Nothing to do. */
-}
+void arch_vm_event_vcpu_enter(struct vcpu *v);
 
 /*
  * Monitor vm-events.
@@ -67,7 +76,8 @@ static inline uint32_t 
vm_event_monitor_get_capabilities(struct domain *d)
 {
     uint32_t capabilities = 0;
 
-    capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
+    capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
+                   (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
 
     return capabilities;
 }
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 8f94e20..ec3eaca 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -121,12 +121,18 @@
 /* An event has been requested via HVMOP_guest_request_vm_event. */
 #define VM_EVENT_REASON_GUEST_REQUEST           8
 
-/* Supported values for the vm_event_write_ctrlreg index. */
+/* Supported values for the vm_event_write_ctrlreg index (x86). */
 #define VM_EVENT_X86_CR0    0
 #define VM_EVENT_X86_CR3    1
 #define VM_EVENT_X86_CR4    2
 #define VM_EVENT_X86_XCR0   3
 
+/* Supported values for the vm_event_write_ctrlreg index (arm). */
+#define VM_EVENT_ARM_SCTLR  0       /* SCTLR_EL1 (aarch64), SCTLR (aarch32) */
+#define VM_EVENT_ARM_TTBR0  1       /* TTBR0_EL1 (aarch64), TTBR0 (aarch32) */
+#define VM_EVENT_ARM_TTBR1  2       /* TTBR1_EL1 (aarch64), TTBR1 (aarch32) */
+#define VM_EVENT_ARM_TTBCR  3       /* TCR_EL1   (aarch64), TTBCR (aarch32) */
+
 /*
  * Using a custom struct (not hvm_hw_cpu) so as to not fill
  * the vm_event ring buffer too quickly.
diff --git a/xen/include/xen/monitor.h b/xen/include/xen/monitor.h
index 422fd93..dc5b9d7 100644
--- a/xen/include/xen/monitor.h
+++ b/xen/include/xen/monitor.h
@@ -25,9 +25,7 @@
 struct domain;
 struct xen_domctl_monitor_op;
 
-#if CONFIG_X86
 #define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index))
-#endif
 
 int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *op);
 
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 71ae84a..8b1acd0 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -96,7 +96,6 @@ void vm_event_vcpu_unpause(struct vcpu *v);
 int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
                            vm_event_request_t *req);
 
-#if CONFIG_X86
 /*
  * Called for the current vCPU on control-register changes by guest.
  * The event might not fire if the client has subscribed to it in onchangeonly
@@ -104,7 +103,6 @@ int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
  */
 bool_t vm_event_monitor_cr(unsigned int index, unsigned long value,
                            unsigned long old);
-#endif
 
 void vm_event_monitor_guest_request(void);
 
-- 
2.5.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.