[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 14/16] sev/emulate: Handle some non-emulable HVM paths



From: Andrei Semenov <andrei.semenov@xxxxxxxxxx>

Some code paths are not emulable under SEV or needs special handling.

Signed-off-by: Andrei Semenov <andrei.semenov@xxxxxxxxxx>
Signed-off-by: Teddy Astie <teddy.astie@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c | 137 ++++++++++++++++++++++++++++++++-----
 xen/arch/x86/hvm/hvm.c     |  13 ++++
 2 files changed, 133 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 6ed8e03475..7ac3be2d59 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -26,6 +26,7 @@
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/monitor.h>
 #include <asm/hvm/support.h>
+#include <asm/hvm/svm/sev.h>
 #include <asm/iocap.h>
 #include <asm/vm_event.h>
 
@@ -689,6 +690,9 @@ static void *hvmemul_map_linear_addr(
         goto unhandleable;
     }
 
+    if ( is_sev_domain(curr->domain) && (nr_frames > 1) )
+        goto unhandleable;
+
     for ( i = 0; i < nr_frames; i++ )
     {
         enum hvm_translation_result res;
@@ -703,8 +707,16 @@ static void *hvmemul_map_linear_addr(
         /* Error checking.  Confirm that the current slot is clean. */
         ASSERT(mfn_x(*mfn) == 0);
 
-        res = hvm_translate_get_page(curr, addr, true, pfec,
+        if ( is_sev_domain(curr->domain) )
+        {
+            struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
+            unsigned long gpa = pfn_to_paddr(hvio->mmio_gpfn) | (addr & 
~PAGE_MASK);
+            res = hvm_translate_get_page(curr, gpa, false, pfec,
                                      &pfinfo, &page, &gfn, &p2mt);
+        }
+        else
+            res = hvm_translate_get_page(curr, addr, true, pfec,
+                                         &pfinfo, &page, &gfn, &p2mt);
 
         switch ( res )
         {
@@ -1173,6 +1185,7 @@ static int hvmemul_linear_mmio_access(
                                                            dir, buffer_offset);
     paddr_t gpa;
     unsigned long one_rep = 1;
+    unsigned int chunk;
     int rc;
 
     if ( cache == NULL )
@@ -1183,21 +1196,50 @@ static int hvmemul_linear_mmio_access(
         ASSERT_UNREACHABLE();
         return X86EMUL_UNHANDLEABLE;
     }
+    
+    chunk = min_t(unsigned int, size, PAGE_SIZE - offset);
 
     if ( known_gpfn )
         gpa = pfn_to_paddr(hvio->mmio_gpfn) | offset;
     else
     {
-        rc = hvmemul_linear_to_phys(gla, &gpa, size, &one_rep, pfec,
+        if ( is_sev_domain(current->domain) )
+            gpa = pfn_to_paddr(hvio->mmio_gpfn) | offset;
+        else
+        {
+            rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+                                        hvmemul_ctxt);
+            if ( rc != X86EMUL_OKAY )
+                return rc;
+        }
+
+        latch_linear_to_phys(hvio, gla, gpa, dir == IOREQ_WRITE);
+    }
+
+    for ( ;; )
+    {
+        rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, 
buffer_offset);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        gla += chunk;
+        buffer_offset += chunk;
+        size -= chunk;
+
+        if ( size == 0 )
+            break;
+
+        if ( is_sev_domain(current->domain) )
+            return X86EMUL_UNHANDLEABLE;
+
+        chunk = min_t(unsigned int, size, PAGE_SIZE);
+        rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
                                     hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
             return rc;
-
-        latch_linear_to_phys(hvio, gla, gpa, dir == IOREQ_WRITE);
     }
 
-    return hvmemul_phys_mmio_access(cache, gpa, size, dir, buffer,
-                                    buffer_offset);
+    return rc;
 }
 
 static inline int hvmemul_linear_mmio_read(
@@ -1254,6 +1296,9 @@ static int linear_read(unsigned long addr, unsigned int 
bytes, void *p_data,
     {
         unsigned int part1 = PAGE_SIZE - offset;
 
+        if ( is_sev_domain(current->domain) )
+            return X86EMUL_UNHANDLEABLE;
+
         /* Split the access at the page boundary. */
         rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
@@ -1278,11 +1323,25 @@ static int linear_read(unsigned long addr, unsigned int 
bytes, void *p_data,
      * upon replay) the RAM access for anything that's ahead of or past MMIO,
      * i.e. in RAM.
      */
-    cache = hvmemul_find_mmio_cache(hvio, start, IOREQ_READ, ~0);
-    if ( !cache ||
-         addr + bytes <= start + cache->skip ||
-         addr >= start + cache->size )
-        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+     cache = hvmemul_find_mmio_cache(hvio, start, IOREQ_READ, ~0);
+     if ( !cache ||
+          addr + bytes <= start + cache->skip ||
+          addr >= start + cache->size )
+     {
+        if ( is_sev_domain(current->domain) )
+        {
+            if ( hvio->mmio_gpfn )
+            {
+                paddr_t gpa;
+                gpa = pfn_to_paddr(hvio->mmio_gpfn) | (addr & ~PAGE_MASK);
+                rc = hvm_copy_from_guest_phys(p_data, gpa, bytes);
+            }
+            else
+                return X86EMUL_UNHANDLEABLE;
+        }
+        else
+            rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, 
&pfinfo);
+    }
 
     switch ( rc )
     {
@@ -1325,6 +1384,9 @@ static int linear_write(unsigned long addr, unsigned int 
bytes, void *p_data,
     {
         unsigned int part1 = PAGE_SIZE - offset;
 
+        if ( is_sev_domain(current->domain) )
+            return X86EMUL_UNHANDLEABLE;
+
         /* Split the access at the page boundary. */
         rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
@@ -1340,9 +1402,23 @@ static int linear_write(unsigned long addr, unsigned int 
bytes, void *p_data,
     /* See commentary in linear_read(). */
     cache = hvmemul_find_mmio_cache(hvio, start, IOREQ_WRITE, ~0);
     if ( !cache ||
-         addr + bytes <= start + cache->skip ||
-         addr >= start + cache->size )
-        rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+        addr + bytes <= start + cache->skip ||
+        addr >= start + cache->size )
+    {
+        if ( is_sev_domain(current->domain) )
+        {
+            if ( hvio->mmio_gpfn )
+            {
+                paddr_t gpa;
+                gpa = pfn_to_paddr(hvio->mmio_gpfn) | (addr & ~PAGE_MASK);
+                rc = hvm_copy_to_guest_phys(gpa, p_data, bytes, current);
+            }
+            else
+                return X86EMUL_UNHANDLEABLE;
+        }
+        else
+            rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+    }
 
     switch ( rc )
     {
@@ -1430,7 +1506,12 @@ int cf_check hvmemul_insn_fetch(
     if ( !bytes ||
          unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
     {
-        int rc = __hvmemul_read(x86_seg_cs, offset, p_data, bytes,
+        int rc;
+
+        if ( is_sev_domain(current->domain) )
+            return X86EMUL_UNHANDLEABLE;
+
+        rc = __hvmemul_read(x86_seg_cs, offset, p_data, bytes,
                                 hvm_access_insn_fetch, hvmemul_ctxt);
 
         if ( rc == X86EMUL_OKAY && bytes )
@@ -1485,6 +1566,7 @@ static int cf_check hvmemul_write(
     if ( !known_gla(addr, bytes, pfec) )
     {
         mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+
         if ( IS_ERR(mapping) )
              return ~PTR_ERR(mapping);
     }
@@ -1719,6 +1801,9 @@ static int cf_check hvmemul_cmpxchg(
     int rc;
     void *mapping = NULL;
 
+    if ( is_sev_domain(current->domain) )
+        return X86EMUL_UNHANDLEABLE;
+
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
@@ -1821,6 +1906,9 @@ static int cf_check hvmemul_rep_ins(
     p2m_type_t p2mt;
     int rc;
 
+    if ( is_sev_domain(current->domain) )
+        return X86EMUL_UNHANDLEABLE;
+
     rc = hvmemul_virtual_to_linear(
         dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
         hvmemul_ctxt, &addr);
@@ -1899,6 +1987,9 @@ static int cf_check hvmemul_rep_outs(
     p2m_type_t p2mt;
     int rc;
 
+    if ( is_sev_domain(current->domain) )
+        return X86EMUL_UNHANDLEABLE;
+
     if ( unlikely(hvmemul_ctxt->set_context) )
         return hvmemul_rep_outs_set_context(dst_port, bytes_per_rep, reps);
 
@@ -1944,6 +2035,9 @@ static int cf_check hvmemul_rep_movs(
     int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
     char *buf;
 
+    if ( is_sev_domain(current->domain) )
+        return X86EMUL_UNHANDLEABLE;
+
     rc = hvmemul_virtual_to_linear(
         src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
         hvmemul_ctxt, &saddr);
@@ -2109,9 +2203,13 @@ static int cf_check hvmemul_rep_stos(
     paddr_t gpa;
     p2m_type_t p2mt;
     bool df = ctxt->regs->eflags & X86_EFLAGS_DF;
-    int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
-                                       hvm_access_write, hvmemul_ctxt, &addr);
+    int rc;
+
+    if ( is_sev_domain(current->domain) )
+        return X86EMUL_UNHANDLEABLE;
 
+    rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
+                                   hvm_access_write, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -2770,6 +2868,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt 
*hvmemul_ctxt,
     struct vcpu *curr = current;
     uint32_t new_intr_shadow;
     struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
+
     int rc;
 
     /*
@@ -2983,6 +3082,9 @@ void hvm_emulate_init_per_insn(
         unsigned int pfec = PFEC_page_present | PFEC_insn_fetch;
         unsigned long addr;
 
+        if ( is_sev_domain(current->domain) )
+            goto out;
+
         if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
             pfec |= PFEC_user_mode;
 
@@ -3000,6 +3102,7 @@ void hvm_emulate_init_per_insn(
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
 
+out:
     hvmemul_ctxt->is_mem_access = false;
 }
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e1bcf8e086..d3060329fb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -56,6 +56,7 @@
 #include <asm/hvm/monitor.h>
 #include <asm/hvm/viridian.h>
 #include <asm/hvm/vm_event.h>
+#include <asm/hvm/svm/sev.h>
 #include <asm/altp2m.h>
 #include <asm/mtrr.h>
 #include <asm/apic.h>
@@ -3477,6 +3478,9 @@ enum hvm_translation_result hvm_copy_to_guest_linear(
     unsigned long addr, const void *buf, unsigned int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
+    if ( is_sev_domain(current->domain) )
+        return HVMTRANS_unhandleable;
+
     return __hvm_copy((void *)buf /* HVMCOPY_to_guest doesn't modify */,
                       addr, size, current, HVMCOPY_to_guest | HVMCOPY_linear,
                       PFEC_page_present | PFEC_write_access | pfec, pfinfo);
@@ -3486,6 +3490,9 @@ enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, unsigned int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
+    if ( is_sev_domain(current->domain) )
+        return HVMTRANS_unhandleable;
+
     return __hvm_copy(buf, addr, size, current,
                       HVMCOPY_from_guest | HVMCOPY_linear,
                       PFEC_page_present | pfec, pfinfo);
@@ -3495,6 +3502,9 @@ enum hvm_translation_result hvm_copy_from_vcpu_linear(
     void *buf, unsigned long addr, unsigned int size, struct vcpu *v,
     unsigned int pfec)
 {
+    if ( is_sev_domain(v->domain) )
+        return HVMTRANS_unhandleable;
+
     return __hvm_copy(buf, addr, size, v,
                       HVMCOPY_from_guest | HVMCOPY_linear,
                       PFEC_page_present | pfec, NULL);
@@ -3522,6 +3532,9 @@ unsigned int clear_user_hvm(void *to, unsigned int len)
 {
     int rc;
 
+    if ( is_sev_domain(current->domain) )
+        return HVMTRANS_unhandleable;
+
     if ( current->hcall_compat && is_compat_arg_xlat_range(to, len) )
     {
         memset(to, 0x00, len);
-- 
2.49.0



Teddy Astie | Vates XCP-ng Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.