[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] EPT: utilize GLA->GPA translation known for certain faults



commit ecb69533582e51999e5d76bce513be870222908f
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Aug 29 12:22:42 2014 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Aug 29 12:22:42 2014 +0200

    EPT: utilize GLA->GPA translation known for certain faults
    
    Rather than doing the translation ourselves in __hvmemul_{read,write}()
    leverage that we know the association for faults other than such having
    occurred when translating addresses of page tables.
    
    There is one intentional but not necessarily obvious (and possibly
    subtle) adjustment to behavior: __hvmemul_read() no longer blindly
    bails on instruction fetches matching the MMIO GVA (the callers of
    handle_mmio_with_translation() now control the behavior via the struct
    npfec they pass, and it didn't seem right to bail here rather than just
    falling through to the unaccelerated path)
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/emulate.c     |   10 ++++++----
 xen/arch/x86/hvm/hvm.c         |    4 ++--
 xen/arch/x86/hvm/io.c          |    9 +++++++--
 xen/arch/x86/mm/shadow/multi.c |   12 ++++++++++--
 xen/include/asm-x86/hvm/io.h   |    3 ++-
 xen/include/asm-x86/hvm/vcpu.h |    3 ++-
 6 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index eac159f..86cf432 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -481,10 +481,11 @@ static int __hvmemul_read(
         while ( off & (chunk - 1) )
             chunk >>= 1;
 
-    if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
+    if ( ((access_type != hvm_access_insn_fetch
+           ? vio->mmio_access.read_access
+           : vio->mmio_access.insn_fetch)) &&
+         (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
-        if ( access_type == hvm_access_insn_fetch )
-            return X86EMUL_UNHANDLEABLE;
         gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
         while ( (off + chunk) <= PAGE_SIZE )
         {
@@ -624,7 +625,8 @@ static int hvmemul_write(
         while ( off & (chunk - 1) )
             chunk >>= 1;
 
-    if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
+    if ( vio->mmio_access.write_access &&
+         (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
         gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
         while ( (off + chunk) <= PAGE_SIZE )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0363714..83e6fae 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2788,7 +2788,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
          && is_hvm_vcpu(v)
          && hvm_mmio_internal(gpa) )
     {
-        if ( !handle_mmio() )
+        if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         rc = 1;
         goto out;
@@ -2862,7 +2862,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
         if ( unlikely(is_pvh_vcpu(v)) )
             goto out;
 
-        if ( !handle_mmio() )
+        if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         rc = 1;
         goto out;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index b2b7b27..9f565d6 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -95,7 +95,7 @@ int handle_mmio(void)
     if ( vio->io_state == HVMIO_awaiting_completion )
         vio->io_state = HVMIO_handle_mmio_awaiting_completion;
     else
-        vio->mmio_gva = 0;
+        vio->mmio_access = (struct npfec){};
 
     switch ( rc )
     {
@@ -124,9 +124,14 @@ int handle_mmio(void)
     return 1;
 }
 
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+                                 struct npfec access)
 {
     struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+
+    vio->mmio_access = access.gla_valid &&
+                       access.kind == npfec_kind_with_gla
+                       ? access : (struct npfec){};
     vio->mmio_gva = gva & PAGE_MASK;
     vio->mmio_gpfn = gpfn;
     return handle_mmio();
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index c6c9d10..225290e 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2824,6 +2824,11 @@ static int sh_page_fault(struct vcpu *v,
     p2m_type_t p2mt;
     uint32_t rc;
     int version;
+    struct npfec access = {
+         .read_access = 1,
+         .gla_valid = 1,
+         .kind = npfec_kind_with_gla
+    };
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
     int fast_emul = 0;
 #endif
@@ -2834,6 +2839,9 @@ static int sh_page_fault(struct vcpu *v,
 
     perfc_incr(shadow_fault);
 
+    if ( regs->error_code & PFEC_write_access )
+        access.write_access = 1;
+
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
     /* If faulting frame is successfully emulated in last shadow fault
      * it's highly likely to reach same emulation action for this frame.
@@ -2935,7 +2943,7 @@ static int sh_page_fault(struct vcpu *v,
             SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
             reset_early_unshadow(v);
             trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
-            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
                     ? EXCRET_fault_fixed : 0);
         }
         else
@@ -3424,7 +3432,7 @@ static int sh_page_fault(struct vcpu *v,
     paging_unlock(d);
     put_gfn(d, gfn_x(gfn));
     trace_shadow_gen(TRC_SHADOW_MMIO, va);
-    return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+    return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
             ? EXCRET_fault_fixed : 0);
 
  not_a_shadow_fault:
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index c7ac566..886a9d6 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -119,7 +119,8 @@ static inline void register_buffered_io_handler(
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+                                 struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
 void hvm_io_assist(ioreq_t *p);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index db37232..01e0665 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -54,8 +54,9 @@ struct hvm_vcpu_io {
      * HVM emulation:
      *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
      *  The latter is known to be an MMIO frame (not RAM).
-     *  This translation is only valid if @mmio_gva is non-zero.
+     *  This translation is only valid for accesses as per @mmio_access.
      */
+    struct npfec        mmio_access;
     unsigned long       mmio_gva;
     unsigned long       mmio_gpfn;
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.