|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2 1/3] xen/vm_access: Support for memory-content hiding
This patch adds support for memory-content hiding, by modifying the
value returned by emulated instructions that read certain memory
addresses that contain sensitive data. The patch only applies to
cases where MEM_ACCESS_EMULATE or MEM_ACCESS_EMULATE_NOWRITE have
been set to a vm_event response.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
Changes since V1:
- Using min() and max() where applicable.
- Fixed memcpy() length in hvmemul_read_set_context().
- "struct vcpu* curr" became "struct vcpu *curr".
- Put "NORMAL" as the first case in the switch() in
hvm_mem_access_emulate_one().
- If ( unlikely(set_context) ) overwrite a safe chunk of the buffer
instead of completely ignoring *reps and bytes in
_hvmemul_rep_movs().
- Added a comment to explain vm_event_emul_read_data.data's size
and updated the header to no longer use a magic constant for the
size.
- A safe portion of the buffer is now overwritten with the provided
bytes (no more lost bytes or ignored reps count).
- Added handlers for hvmemul_cmpxchg(), hvmemul_read_io() and
hvmemul_rep_outs().
- Named the regs / emul_read_data union.
- Modified xen-access.c to compile after naming the regs /
emul_read_data union.
---
tools/tests/xen-access/xen-access.c | 2 +-
xen/arch/x86/domain.c | 1 +
xen/arch/x86/hvm/emulate.c | 218 +++++++++++++++++++++++++++++++++--
xen/arch/x86/hvm/event.c | 50 ++++----
xen/arch/x86/mm/p2m.c | 90 ++++++++-------
xen/common/vm_event.c | 23 ++++
xen/include/asm-x86/domain.h | 2 +
xen/include/asm-x86/hvm/emulate.h | 10 +-
xen/include/public/vm_event.h | 21 +++-
9 files changed, 339 insertions(+), 78 deletions(-)
diff --git a/tools/tests/xen-access/xen-access.c
b/tools/tests/xen-access/xen-access.c
index 12ab921..e6ca9ba 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -530,7 +530,7 @@ int main(int argc, char *argv[])
break;
case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
printf("Breakpoint: rip=%016"PRIx64", gfn=%"PRIx64" (vcpu
%d)\n",
- req.regs.x86.rip,
+ req.data.regs.x86.rip,
req.u.software_breakpoint.gfn,
req.vcpu_id);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index db073a6..95eb190 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -269,6 +269,7 @@ struct vcpu *alloc_vcpu_struct(void)
void free_vcpu_struct(struct vcpu *v)
{
+ xfree(v->arch.vm_event.emul_read_data);
free_xenheap_page(v);
}
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index ac9c9d6..2f7081d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -578,6 +578,28 @@ static int hvmemul_read(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
}
+static int hvmemul_read_set_context(
+ enum x86_segment seg,
+ unsigned long offset,
+ void *p_data,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+ unsigned int len;
+
+ if ( !curr->arch.vm_event.emul_read_data )
+ return X86EMUL_UNHANDLEABLE;
+
+ len = min_t(unsigned int,
+ bytes, curr->arch.vm_event.emul_read_data->size);
+
+ if ( len )
+ memcpy(p_data, curr->arch.vm_event.emul_read_data->data, len);
+
+ return X86EMUL_OKAY;
+}
+
static int hvmemul_insn_fetch(
enum x86_segment seg,
unsigned long offset,
@@ -815,6 +837,27 @@ static int hvmemul_cmpxchg(
return hvmemul_write(seg, offset, p_new, bytes, ctxt);
}
+static int hvmemul_cmpxchg_set_context(
+ enum x86_segment seg,
+ unsigned long offset,
+ void *p_old,
+ void *p_new,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+
+ if ( curr->arch.vm_event.emul_read_data )
+ {
+ unsigned int safe_bytes = min_t(unsigned int, bytes,
+ curr->arch.vm_event.emul_read_data->size);
+
+ memcpy(p_new, curr->arch.vm_event.emul_read_data->data, safe_bytes);
+ }
+
+ return hvmemul_write(seg, offset, p_new, bytes, ctxt);
+}
+
static int hvmemul_rep_ins(
uint16_t src_port,
enum x86_segment dst_seg,
@@ -891,14 +934,37 @@ static int hvmemul_rep_outs(
!!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
}
-static int hvmemul_rep_movs(
+static int hvmemul_rep_outs_set_context(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ uint16_t dst_port,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+ unsigned int safe_bytes;
+
+ if ( !curr->arch.vm_event.emul_read_data )
+ return X86EMUL_UNHANDLEABLE;
+
+ safe_bytes = min_t(unsigned int, bytes_per_rep,
+ curr->arch.vm_event.emul_read_data->size);
+
+ return hvmemul_do_pio(dst_port, reps, safe_bytes, 0, IOREQ_WRITE,
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF),
+ curr->arch.vm_event.emul_read_data->data);
+}
+
+static int _hvmemul_rep_movs(
enum x86_segment src_seg,
unsigned long src_offset,
enum x86_segment dst_seg,
unsigned long dst_offset,
unsigned int bytes_per_rep,
unsigned long *reps,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool_t set_context)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
@@ -981,7 +1047,19 @@ static int hvmemul_rep_movs(
*/
rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
if ( rc == HVMCOPY_okay )
+ {
+ struct vcpu *curr = current;
+
+ if ( unlikely(set_context) && curr->arch.vm_event.emul_read_data )
+ {
+ unsigned long safe_bytes = min_t(unsigned long, bytes,
+ curr->arch.vm_event.emul_read_data->size);
+
+ memcpy(buf, curr->arch.vm_event.emul_read_data->data, safe_bytes);
+ }
+
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
+ }
xfree(buf);
@@ -1000,13 +1078,40 @@ static int hvmemul_rep_movs(
return X86EMUL_OKAY;
}
-static int hvmemul_rep_stos(
+static int hvmemul_rep_movs(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset,
+ bytes_per_rep, reps, ctxt, 0);
+}
+
+static int hvmemul_rep_movs_set_context(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset,
+ bytes_per_rep, reps, ctxt, 1);
+}
+
+static int _hvmemul_rep_stos(
void *p_data,
enum x86_segment seg,
unsigned long offset,
unsigned int bytes_per_rep,
unsigned long *reps,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool_t set_context)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
@@ -1016,6 +1121,7 @@ static int hvmemul_rep_stos(
bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
hvm_access_write, hvmemul_ctxt, &addr);
+ struct vcpu *curr = current;
if ( rc == X86EMUL_OKAY )
{
@@ -1080,6 +1186,14 @@ static int hvmemul_rep_stos(
if ( df )
gpa -= bytes - bytes_per_rep;
+ if ( unlikely(set_context) && curr->arch.vm_event.emul_read_data )
+ {
+ unsigned long safe_bytes = min_t(unsigned long, bytes,
+ curr->arch.vm_event.emul_read_data->size);
+
+ memcpy(buf, curr->arch.vm_event.emul_read_data->data, safe_bytes);
+ }
+
rc = hvm_copy_to_guest_phys(gpa, buf, bytes);
if ( buf != p_data )
@@ -1107,6 +1221,31 @@ static int hvmemul_rep_stos(
}
}
+static int hvmemul_rep_stos(
+ void *p_data,
+ enum x86_segment seg,
+ unsigned long offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_stos(p_data, seg, offset, bytes_per_rep,
+ reps, ctxt, 0);
+}
+
+
+static int hvmemul_rep_stos_set_context(
+ void *p_data,
+ enum x86_segment seg,
+ unsigned long offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_stos(p_data, seg, offset, bytes_per_rep,
+ reps, ctxt, 1);
+}
+
static int hvmemul_read_segment(
enum x86_segment seg,
struct segment_register *reg,
@@ -1145,6 +1284,28 @@ static int hvmemul_read_io(
return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, val);
}
+static int hvmemul_read_io_set_context(
+ unsigned int port,
+ unsigned int bytes,
+ unsigned long *val,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+ unsigned int safe_bytes;
+
+ *val = 0;
+
+ if ( !curr->arch.vm_event.emul_read_data )
+ return X86EMUL_UNHANDLEABLE;
+
+ safe_bytes = min_t(unsigned int, bytes,
+ curr->arch.vm_event.emul_read_data->size);
+
+ memcpy(val, curr->arch.vm_event.emul_read_data->data, safe_bytes);
+
+ return X86EMUL_OKAY;
+}
+
static int hvmemul_write_io(
unsigned int port,
unsigned int bytes,
@@ -1408,6 +1569,32 @@ static const struct x86_emulate_ops
hvm_emulate_ops_no_write = {
.invlpg = hvmemul_invlpg
};
+static const struct x86_emulate_ops hvm_emulate_ops_set_context = {
+ .read = hvmemul_read_set_context,
+ .insn_fetch = hvmemul_insn_fetch,
+ .write = hvmemul_write,
+ .cmpxchg = hvmemul_cmpxchg_set_context,
+ .rep_ins = hvmemul_rep_ins,
+ .rep_outs = hvmemul_rep_outs_set_context,
+ .rep_movs = hvmemul_rep_movs_set_context,
+ .rep_stos = hvmemul_rep_stos_set_context,
+ .read_segment = hvmemul_read_segment,
+ .write_segment = hvmemul_write_segment,
+ .read_io = hvmemul_read_io_set_context,
+ .write_io = hvmemul_write_io,
+ .read_cr = hvmemul_read_cr,
+ .write_cr = hvmemul_write_cr,
+ .read_msr = hvmemul_read_msr,
+ .write_msr = hvmemul_write_msr,
+ .wbinvd = hvmemul_wbinvd,
+ .cpuid = hvmemul_cpuid,
+ .inject_hw_exception = hvmemul_inject_hw_exception,
+ .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
+ .get_fpu = hvmemul_get_fpu,
+ .put_fpu = hvmemul_put_fpu,
+ .invlpg = hvmemul_invlpg
+};
+
static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
const struct x86_emulate_ops *ops)
{
@@ -1528,18 +1715,31 @@ int hvm_emulate_one_no_write(
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
}
-void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
+int hvm_emulate_one_set_context(
+ struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_set_context);
+}
+
+void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
- int rc;
+ int rc = X86EMUL_UNHANDLEABLE;
hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
- if ( nowrite )
- rc = hvm_emulate_one_no_write(&ctx);
- else
+ switch ( kind ) {
+ case EMUL_KIND_NORMAL:
rc = hvm_emulate_one(&ctx);
+ break;
+ case EMUL_KIND_NOWRITE:
+ rc = hvm_emulate_one_no_write(&ctx);
+ break;
+ case EMUL_KIND_SET_CONTEXT:
+ rc = hvm_emulate_one_set_context(&ctx);
+ break;
+ }
switch ( rc )
{
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 53b9ca4..5341937 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -30,31 +30,31 @@ static void hvm_event_fill_regs(vm_event_request_t *req)
const struct cpu_user_regs *regs = guest_cpu_user_regs();
const struct vcpu *curr = current;
- req->regs.x86.rax = regs->eax;
- req->regs.x86.rcx = regs->ecx;
- req->regs.x86.rdx = regs->edx;
- req->regs.x86.rbx = regs->ebx;
- req->regs.x86.rsp = regs->esp;
- req->regs.x86.rbp = regs->ebp;
- req->regs.x86.rsi = regs->esi;
- req->regs.x86.rdi = regs->edi;
-
- req->regs.x86.r8 = regs->r8;
- req->regs.x86.r9 = regs->r9;
- req->regs.x86.r10 = regs->r10;
- req->regs.x86.r11 = regs->r11;
- req->regs.x86.r12 = regs->r12;
- req->regs.x86.r13 = regs->r13;
- req->regs.x86.r14 = regs->r14;
- req->regs.x86.r15 = regs->r15;
-
- req->regs.x86.rflags = regs->eflags;
- req->regs.x86.rip = regs->eip;
-
- req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
- req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
- req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
- req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+ req->data.regs.x86.rax = regs->eax;
+ req->data.regs.x86.rcx = regs->ecx;
+ req->data.regs.x86.rdx = regs->edx;
+ req->data.regs.x86.rbx = regs->ebx;
+ req->data.regs.x86.rsp = regs->esp;
+ req->data.regs.x86.rbp = regs->ebp;
+ req->data.regs.x86.rsi = regs->esi;
+ req->data.regs.x86.rdi = regs->edi;
+
+ req->data.regs.x86.r8 = regs->r8;
+ req->data.regs.x86.r9 = regs->r9;
+ req->data.regs.x86.r10 = regs->r10;
+ req->data.regs.x86.r11 = regs->r11;
+ req->data.regs.x86.r12 = regs->r12;
+ req->data.regs.x86.r13 = regs->r13;
+ req->data.regs.x86.r14 = regs->r14;
+ req->data.regs.x86.r15 = regs->r15;
+
+ req->data.regs.x86.rflags = regs->eflags;
+ req->data.regs.x86.rip = regs->eip;
+
+ req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+ req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+ req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+ req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
}
static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1fd1194..0ab74bc 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1369,49 +1369,49 @@ static void p2m_vm_event_fill_regs(vm_event_request_t
*req)
/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(curr, &ctxt);
- req->regs.x86.rax = regs->eax;
- req->regs.x86.rcx = regs->ecx;
- req->regs.x86.rdx = regs->edx;
- req->regs.x86.rbx = regs->ebx;
- req->regs.x86.rsp = regs->esp;
- req->regs.x86.rbp = regs->ebp;
- req->regs.x86.rsi = regs->esi;
- req->regs.x86.rdi = regs->edi;
-
- req->regs.x86.r8 = regs->r8;
- req->regs.x86.r9 = regs->r9;
- req->regs.x86.r10 = regs->r10;
- req->regs.x86.r11 = regs->r11;
- req->regs.x86.r12 = regs->r12;
- req->regs.x86.r13 = regs->r13;
- req->regs.x86.r14 = regs->r14;
- req->regs.x86.r15 = regs->r15;
-
- req->regs.x86.rflags = regs->eflags;
- req->regs.x86.rip = regs->eip;
-
- req->regs.x86.dr7 = curr->arch.debugreg[7];
- req->regs.x86.cr0 = ctxt.cr0;
- req->regs.x86.cr2 = ctxt.cr2;
- req->regs.x86.cr3 = ctxt.cr3;
- req->regs.x86.cr4 = ctxt.cr4;
-
- req->regs.x86.sysenter_cs = ctxt.sysenter_cs;
- req->regs.x86.sysenter_esp = ctxt.sysenter_esp;
- req->regs.x86.sysenter_eip = ctxt.sysenter_eip;
-
- req->regs.x86.msr_efer = ctxt.msr_efer;
- req->regs.x86.msr_star = ctxt.msr_star;
- req->regs.x86.msr_lstar = ctxt.msr_lstar;
+ req->data.regs.x86.rax = regs->eax;
+ req->data.regs.x86.rcx = regs->ecx;
+ req->data.regs.x86.rdx = regs->edx;
+ req->data.regs.x86.rbx = regs->ebx;
+ req->data.regs.x86.rsp = regs->esp;
+ req->data.regs.x86.rbp = regs->ebp;
+ req->data.regs.x86.rsi = regs->esi;
+ req->data.regs.x86.rdi = regs->edi;
+
+ req->data.regs.x86.r8 = regs->r8;
+ req->data.regs.x86.r9 = regs->r9;
+ req->data.regs.x86.r10 = regs->r10;
+ req->data.regs.x86.r11 = regs->r11;
+ req->data.regs.x86.r12 = regs->r12;
+ req->data.regs.x86.r13 = regs->r13;
+ req->data.regs.x86.r14 = regs->r14;
+ req->data.regs.x86.r15 = regs->r15;
+
+ req->data.regs.x86.rflags = regs->eflags;
+ req->data.regs.x86.rip = regs->eip;
+
+ req->data.regs.x86.dr7 = curr->arch.debugreg[7];
+ req->data.regs.x86.cr0 = ctxt.cr0;
+ req->data.regs.x86.cr2 = ctxt.cr2;
+ req->data.regs.x86.cr3 = ctxt.cr3;
+ req->data.regs.x86.cr4 = ctxt.cr4;
+
+ req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
+ req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
+ req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
+
+ req->data.regs.x86.msr_efer = ctxt.msr_efer;
+ req->data.regs.x86.msr_star = ctxt.msr_star;
+ req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
hvm_get_segment_register(curr, x86_seg_fs, &seg);
- req->regs.x86.fs_base = seg.base;
+ req->data.regs.x86.fs_base = seg.base;
hvm_get_segment_register(curr, x86_seg_gs, &seg);
- req->regs.x86.gs_base = seg.base;
+ req->data.regs.x86.gs_base = seg.base;
hvm_get_segment_register(curr, x86_seg_cs, &seg);
- req->regs.x86.cs_arbytes = seg.attr.bytes;
+ req->data.regs.x86.cs_arbytes = seg.attr.bytes;
}
void p2m_mem_access_emulate_check(struct vcpu *v,
@@ -1466,6 +1466,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
}
v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+
+ if ( rsp->flags & MEM_ACCESS_SET_EMUL_READ_DATA &&
+ v->arch.vm_event.emul_read_data )
+ *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
}
}
@@ -1552,9 +1556,15 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
if ( v->arch.vm_event.emulate_flags )
{
- hvm_mem_access_emulate_one((v->arch.vm_event.emulate_flags &
- MEM_ACCESS_EMULATE_NOWRITE) != 0,
- TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ enum emul_kind kind = EMUL_KIND_NORMAL;
+
+ if ( v->arch.vm_event.emulate_flags & MEM_ACCESS_SET_EMUL_READ_DATA )
+ kind = EMUL_KIND_SET_CONTEXT;
+ else if ( v->arch.vm_event.emulate_flags & MEM_ACCESS_EMULATE_NOWRITE )
+ kind = EMUL_KIND_NOWRITE;
+
+ hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+ HVM_DELIVER_NO_ERROR_CODE);
v->arch.vm_event.emulate_flags = 0;
return 1;
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 120a78a..d635b36 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -48,6 +48,7 @@ static int vm_event_enable(
{
int rc;
unsigned long ring_gfn = d->arch.hvm_domain.params[param];
+ struct vcpu *v;
/* Only one helper at a time. If the helper crashed,
* the ring is in an undefined state and so is the guest.
@@ -63,6 +64,21 @@ static int vm_event_enable(
vm_event_ring_lock_init(ved);
vm_event_ring_lock(ved);
+ for_each_vcpu( d, v )
+ {
+ if ( v->arch.vm_event.emul_read_data )
+ break;
+
+ v->arch.vm_event.emul_read_data =
+ xmalloc(struct vm_event_emul_read_data);
+
+ if ( !v->arch.vm_event.emul_read_data )
+ {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
&ved->ring_page);
if ( rc < 0 )
@@ -225,6 +241,13 @@ static int vm_event_disable(struct domain *d, struct
vm_event_domain *ved)
destroy_ring_for_helper(&ved->ring_page,
ved->ring_pg_struct);
+
+ for_each_vcpu( d, v )
+ {
+ xfree(v->arch.vm_event.emul_read_data);
+ v->arch.vm_event.emul_read_data = NULL;
+ }
+
vm_event_ring_unlock(ved);
}
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index a3c117f..24011d5 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -10,6 +10,7 @@
#include <asm/mce.h>
#include <public/vcpu.h>
#include <public/hvm/hvm_info_table.h>
+#include <public/vm_event.h>
#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
@@ -504,6 +505,7 @@ struct arch_vcpu
uint32_t emulate_flags;
unsigned long gpa;
unsigned long eip;
+ struct vm_event_emul_read_data *emul_read_data;
} vm_event;
};
diff --git a/xen/include/asm-x86/hvm/emulate.h
b/xen/include/asm-x86/hvm/emulate.h
index b3971c8..65ccfd8 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -34,11 +34,19 @@ struct hvm_emulate_ctxt {
uint32_t intr_shadow;
};
+enum emul_kind {
+ EMUL_KIND_NORMAL,
+ EMUL_KIND_NOWRITE,
+ EMUL_KIND_SET_CONTEXT
+};
+
int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_no_write(
struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_access_emulate_one(bool_t nowrite,
+int hvm_emulate_one_set_context(
+ struct hvm_emulate_ctxt *hvmemul_ctxt);
+void hvm_mem_access_emulate_one(enum emul_kind kind,
unsigned int trapnr,
unsigned int errcode);
void hvm_emulate_prepare(
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 577e971..3223bb4 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -149,6 +149,13 @@ struct vm_event_regs_x86 {
* potentially having side effects (like memory mapped or port I/O) disabled.
*/
#define MEM_ACCESS_EMULATE_NOWRITE (1 << 7)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with MEM_ACCESS_EMULATE or
+ * MEM_ACCESS_EMULATE_NOWRITE.
+ */
+#define MEM_ACCESS_SET_EMUL_READ_DATA (1 << 8)
struct vm_event_mem_access {
uint64_t gfn;
@@ -189,6 +196,12 @@ struct vm_event_sharing {
uint32_t _pad;
};
+struct vm_event_emul_read_data {
+ uint32_t size;
+ /* The struct is used in a union with vm_event_regs_x86. */
+ uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
+};
+
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
@@ -206,8 +219,12 @@ typedef struct vm_event_st {
} u;
union {
- struct vm_event_regs_x86 x86;
- } regs;
+ union {
+ struct vm_event_regs_x86 x86;
+ } regs;
+
+ struct vm_event_emul_read_data emul_read_data;
+ } data;
} vm_event_request_t, vm_event_response_t;
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |