|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases
Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
RFC: This clearly wants mirroring to the write path, and perhaps also
to the fallback code on the RMW path. But I'd like to get a sense
first on how welcome the general approach is.
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,48 @@ static inline int hvmemul_linear_mmio_wr
pfec, hvmemul_ctxt, translate);
}
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+ switch ( rc )
+ {
+ unsigned int offset, part1;
+
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
+
+ case HVMTRANS_bad_linear_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+ return X86EMUL_EXCEPTION;
+
+ case HVMTRANS_bad_gfn_to_mfn:
+ if ( pfec & PFEC_insn_fetch )
+ return X86EMUL_UNHANDLEABLE;
+
+ offset = addr & ~PAGE_MASK;
+ if ( offset + bytes <= PAGE_SIZE )
+ return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+ hvmemul_ctxt, 0);
+
+ /* Split the access at the page boundary. */
+ part1 = PAGE_SIZE - offset;
+ rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+ if ( rc == X86EMUL_OKAY )
+ rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+ pfec, hvmemul_ctxt);
+ return rc;
+
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
@@ -1049,11 +1091,9 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct vcpu *curr = current;
- pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
@@ -1073,28 +1113,7 @@ static int __hvmemul_read(
(vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
hvmemul_ctxt, 1);
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
- switch ( rc )
- {
- case HVMTRANS_okay:
- break;
- case HVMTRANS_bad_linear_to_gfn:
- x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
- return X86EMUL_EXCEPTION;
- case HVMTRANS_bad_gfn_to_mfn:
- if ( access_type == hvm_access_insn_fetch )
- return X86EMUL_UNHANDLEABLE;
-
- return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
hvmemul_ctxt, 0);
- case HVMTRANS_gfn_paged_out:
- case HVMTRANS_gfn_shared:
- return X86EMUL_RETRY;
- default:
- return X86EMUL_UNHANDLEABLE;
- }
-
- return X86EMUL_OKAY;
+ return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
}
static int hvmemul_read(
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |