[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/4] x86/mm: use optional cache in guest_walk_tables()



The caching isn't actually implemented here, this is just setting the
stage.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Don't wrongly use top_gfn for non-root gpa calculation. Re-write
    cache entries after setting A/D bits (an alternative would be to
    suppress their setting upon cache hits).

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2664,6 +2664,18 @@ void hvm_dump_emulation_state(const char
            hvmemul_ctxt->insn_buf);
 }
 
+bool hvmemul_read_cache(const struct hvmemul_cache *cache, paddr_t gpa,
+                        unsigned int level, void *buffer, unsigned int size)
+{
+    return false;
+}
+
+void hvmemul_write_cache(struct hvmemul_cache *cache, paddr_t gpa,
+                         unsigned int level, const void *buffer,
+                         unsigned int size)
+{
+}
+
 /*
  * Local variables:
  * mode: C
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -92,8 +92,13 @@ guest_walk_tables(struct vcpu *v, struct
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
     guest_l3e_t *l3p = NULL;
     guest_l4e_t *l4p;
+    paddr_t l4gpa;
+#endif
+#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
+    paddr_t l3gpa;
 #endif
     uint32_t gflags, rc;
+    paddr_t l1gpa = 0, l2gpa = 0;
     unsigned int leaf_level;
     p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
 
@@ -134,7 +139,15 @@ guest_walk_tables(struct vcpu *v, struct
     /* Get the l4e from the top level table and check its flags*/
     gw->l4mfn = top_mfn;
     l4p = (guest_l4e_t *) top_map;
-    gw->l4e = l4p[guest_l4_table_offset(gla)];
+    l4gpa = gfn_to_gaddr(top_gfn) +
+            guest_l4_table_offset(gla) * sizeof(gw->l4e);
+    if ( !cache ||
+         !hvmemul_read_cache(cache, l4gpa, 4, &gw->l4e, sizeof(gw->l4e)) )
+    {
+        gw->l4e = l4p[guest_l4_table_offset(gla)];
+        if ( cache )
+            hvmemul_write_cache(cache, l4gpa, 4, &gw->l4e, sizeof(gw->l4e));
+    }
     gflags = guest_l4e_get_flags(gw->l4e);
     if ( !(gflags & _PAGE_PRESENT) )
         goto out;
@@ -164,7 +177,15 @@ guest_walk_tables(struct vcpu *v, struct
     }
 
     /* Get the l3e and check its flags*/
-    gw->l3e = l3p[guest_l3_table_offset(gla)];
+    l3gpa = gfn_to_gaddr(guest_l4e_get_gfn(gw->l4e)) +
+            guest_l3_table_offset(gla) * sizeof(gw->l3e);
+    if ( !cache ||
+         !hvmemul_read_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e)) )
+    {
+        gw->l3e = l3p[guest_l3_table_offset(gla)];
+        if ( cache )
+            hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e));
+    }
     gflags = guest_l3e_get_flags(gw->l3e);
     if ( !(gflags & _PAGE_PRESENT) )
         goto out;
@@ -216,7 +237,16 @@ guest_walk_tables(struct vcpu *v, struct
 #else /* PAE only... */
 
     /* Get the l3e and check its flag */
-    gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)];
+    l3gpa = gfn_to_gaddr(top_gfn) + ((unsigned long)top_map & ~PAGE_MASK) +
+            guest_l3_table_offset(gla) * sizeof(gw->l3e);
+    if ( !cache ||
+         !hvmemul_read_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e)) )
+    {
+        gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)];
+        if ( cache )
+            hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e));
+    }
+
     gflags = guest_l3e_get_flags(gw->l3e);
     if ( !(gflags & _PAGE_PRESENT) )
         goto out;
@@ -242,18 +272,26 @@ guest_walk_tables(struct vcpu *v, struct
         goto out;
     }
 
-    /* Get the l2e */
-    gw->l2e = l2p[guest_l2_table_offset(gla)];
+    l2gpa = gfn_to_gaddr(guest_l3e_get_gfn(gw->l3e));
 
 #else /* 32-bit only... */
 
-    /* Get l2e from the top level table */
     gw->l2mfn = top_mfn;
     l2p = (guest_l2e_t *) top_map;
-    gw->l2e = l2p[guest_l2_table_offset(gla)];
+    l2gpa = gfn_to_gaddr(top_gfn);
 
 #endif /* All levels... */
 
+    /* Get the l2e */
+    l2gpa += guest_l2_table_offset(gla) * sizeof(gw->l2e);
+    if ( !cache ||
+         !hvmemul_read_cache(cache, l2gpa, 2, &gw->l2e, sizeof(gw->l2e)) )
+    {
+        gw->l2e = l2p[guest_l2_table_offset(gla)];
+        if ( cache )
+            hvmemul_write_cache(cache, l2gpa, 2, &gw->l2e, sizeof(gw->l2e));
+    }
+
     /* Check the l2e flags. */
     gflags = guest_l2e_get_flags(gw->l2e);
     if ( !(gflags & _PAGE_PRESENT) )
@@ -335,7 +373,17 @@ guest_walk_tables(struct vcpu *v, struct
         gw->pfec |= rc & PFEC_synth_mask;
         goto out;
     }
-    gw->l1e = l1p[guest_l1_table_offset(gla)];
+
+    l1gpa = gfn_to_gaddr(guest_l2e_get_gfn(gw->l2e)) +
+            guest_l1_table_offset(gla) * sizeof(gw->l1e);
+    if ( !cache ||
+         !hvmemul_read_cache(cache, l1gpa, 1, &gw->l1e, sizeof(gw->l1e)) )
+    {
+        gw->l1e = l1p[guest_l1_table_offset(gla)];
+        if ( cache )
+            hvmemul_write_cache(cache, l1gpa, 1, &gw->l1e, sizeof(gw->l1e));
+    }
+
     gflags = guest_l1e_get_flags(gw->l1e);
     if ( !(gflags & _PAGE_PRESENT) )
         goto out;
@@ -446,22 +494,38 @@ guest_walk_tables(struct vcpu *v, struct
     case 1:
         if ( set_ad_bits(&l1p[guest_l1_table_offset(gla)].l1, &gw->l1e.l1,
                          (walk & PFEC_write_access)) )
+        {
             paging_mark_dirty(d, gw->l1mfn);
+            if ( cache )
+                hvmemul_write_cache(cache, l1gpa, 1, &gw->l1e, 
sizeof(gw->l1e));
+        }
         /* Fallthrough */
     case 2:
         if ( set_ad_bits(&l2p[guest_l2_table_offset(gla)].l2, &gw->l2e.l2,
                          (walk & PFEC_write_access) && leaf_level == 2) )
+        {
             paging_mark_dirty(d, gw->l2mfn);
+            if ( cache )
+                hvmemul_write_cache(cache, l2gpa, 2, &gw->l2e, 
sizeof(gw->l2e));
+        }
         /* Fallthrough */
 #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
     case 3:
         if ( set_ad_bits(&l3p[guest_l3_table_offset(gla)].l3, &gw->l3e.l3,
                          (walk & PFEC_write_access) && leaf_level == 3) )
+        {
             paging_mark_dirty(d, gw->l3mfn);
+            if ( cache )
+                hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, 
sizeof(gw->l3e));
+        }
 
         if ( set_ad_bits(&l4p[guest_l4_table_offset(gla)].l4, &gw->l4e.l4,
                          false) )
+        {
             paging_mark_dirty(d, gw->l4mfn);
+            if ( cache )
+                hvmemul_write_cache(cache, l4gpa, 4, &gw->l4e, 
sizeof(gw->l4e));
+        }
 #endif
     }
 
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -98,6 +98,13 @@ int hvmemul_do_pio_buffer(uint16_t port,
                           uint8_t dir,
                           void *buffer);
 
+struct hvmemul_cache;
+bool hvmemul_read_cache(const struct hvmemul_cache *, paddr_t gpa,
+                        unsigned int level, void *buffer, unsigned int size);
+void hvmemul_write_cache(struct hvmemul_cache *, paddr_t gpa,
+                         unsigned int level, const void *buffer,
+                         unsigned int size);
+
 void hvm_dump_emulation_state(const char *loglvl, const char *prefix,
                               struct hvm_emulate_ctxt *hvmemul_ctxt, int rc);
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.