[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3 of 3] x86/mm: New mem access type to log access


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
  • Date: Thu, 01 Dec 2011 14:24:59 -0500
  • Cc: andres@xxxxxxxxxxxxxx, keir.xen@xxxxxxxxx, tim@xxxxxxx, JBeulich@xxxxxxxx, adin@xxxxxxxxxxxxxx
  • Delivery-date: Thu, 01 Dec 2011 19:28:20 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=content-type :mime-version:content-transfer-encoding:subject:message-id :in-reply-to:references:date:from:to:cc; q=dns; s= lagarcavilla.org; b=AuKVlagDvSLs7kvdHCghFIUMMVAnc8Swa7V/Y6V2LG/O z6f6QLlg57jqoqTuXtSORuHVpX1GupvKTpAiSWhrTPNmYSA+qkqRtsp7YFkBotbu Ww9fc9ZT0bMl/Lse00ztlJquw2vzLNHLv/rLabrEjfiBDHHdVBobTZ6WTt/yWjA=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

 xen/arch/x86/hvm/hvm.c          |   1 +
 xen/arch/x86/mm/p2m-ept.c       |   1 +
 xen/arch/x86/mm/p2m.c           |  30 +++++++++++++++++++++---------
 xen/include/asm-x86/p2m.h       |   3 +++
 xen/include/public/hvm/hvm_op.h |   3 +++
 5 files changed, 29 insertions(+), 9 deletions(-)


This patch adds a new p2m access type, n2rwx. It allows for implement a "log
access" mode in the hypervisor, aking to log dirty but for all types of
accesses. Faults caused by this access mode automatically promote the
access rights of the ofending p2m entry, place the event in the ring, and
let the vcpu keep on executing.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>

diff -r 7213610b8003 -r 92a379fcef54 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1250,6 +1250,7 @@ int hvm_hap_nested_page_fault(unsigned l
         switch (p2ma) 
         {
         case p2m_access_n:
+        case p2m_access_n2rwx:
         default:
             violation = access_r || access_w || access_x;
             break;
diff -r 7213610b8003 -r 92a379fcef54 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -111,6 +111,7 @@ static void ept_p2m_type_to_flags(ept_en
     switch (access) 
     {
         case p2m_access_n:
+        case p2m_access_n2rwx:
             entry->r = entry->w = entry->x = 0;
             break;
         case p2m_access_r:
diff -r 7213610b8003 -r 92a379fcef54 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1129,6 +1129,11 @@ bool_t p2m_mem_access_check(unsigned lon
         p2m_unlock(p2m);
         return 1;
     }
+    else if ( p2ma == p2m_access_n2rwx )
+    {
+        ASSERT(access_w || access_r || access_x);
+        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
+    }
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the 
message along */
@@ -1143,10 +1148,13 @@ bool_t p2m_mem_access_check(unsigned lon
         }
         else
         {
-            /* A listener is not required, so clear the access restrictions */
-            p2m_lock(p2m);
-            p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
-            p2m_unlock(p2m);
+            if ( p2ma != p2m_access_n2rwx )
+            {
+                /* A listener is not required, so clear the access 
restrictions */
+                p2m_lock(p2m);
+                p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, 
p2m_access_rwx);
+                p2m_unlock(p2m);
+            }
             return 1;
         }
 
@@ -1157,9 +1165,12 @@ bool_t p2m_mem_access_check(unsigned lon
     req.type = MEM_EVENT_TYPE_ACCESS;
     req.reason = MEM_EVENT_REASON_VIOLATION;
 
-    /* Pause the current VCPU unconditionally */
-    vcpu_pause_nosync(v);
-    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
+    /* Pause the current VCPU */
+    if ( p2ma != p2m_access_n2rwx )
+    {
+        vcpu_pause_nosync(v);
+        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+    } 
 
     /* Send request to mem event */
     req.gfn = gfn;
@@ -1173,8 +1184,8 @@ bool_t p2m_mem_access_check(unsigned lon
     req.vcpu_id = v->vcpu_id;
 
     (void)mem_event_put_request(d, &d->mem_event->access, &req);
-    /* VCPU paused */
-    return 0;
+    /* VCPU may be paused, return whether we promoted automatically */
+    return (p2ma == p2m_access_n2rwx);
 }
 
 void p2m_mem_access_resume(struct domain *d)
@@ -1218,6 +1229,7 @@ int p2m_set_mem_access(struct domain *d,
         p2m_access_wx,
         p2m_access_rwx,
         p2m_access_rx2rw,
+        p2m_access_n2rwx,
         p2m->default_access,
     };
 
diff -r 7213610b8003 -r 92a379fcef54 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -108,6 +108,9 @@ typedef enum {
     p2m_access_wx    = 6, 
     p2m_access_rwx   = 7,
     p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
+                           * generates an event but does not pause the
+                           * vcpu */
 
     /* NOTE: Assumed to be only 4 bits right now */
 } p2m_access_t;
diff -r 7213610b8003 -r 92a379fcef54 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -174,6 +174,9 @@ typedef enum {
     HVMMEM_access_rwx,
     HVMMEM_access_rx2rw,       /* Page starts off as r-x, but automatically
                                 * change to r-w on a write */
+    HVMMEM_access_n2rwx,       /* Log access: starts off as n, automatically 
+                                * goes to rwx, generating an event without
+                                * pausing the vcpu */
     HVMMEM_access_default      /* Take the domain default */
 } hvmmem_access_t;
 /* Notify that a region of memory is to have specific access types */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.