[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] nEPT: Use minimal permission for nested p2m


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 16 Jan 2013 08:22:30 +0000
  • Delivery-date: Wed, 16 Jan 2013 08:22:36 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
# Date 1358245703 -3600
# Node ID 51095ed4f95131d7aa467810c3fae043c945220b
# Parent  1756a6782c750b40e136d197fcd07420c4e59b59
nEPT: Use minimal permission for nested p2m

Emulate permission check for the nested p2m. Current solution is to
use minimal permission, and once meet permission violation in L0, then
determin whether it is caused by guest EPT or host EPT

Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 1756a6782c75 -r 51095ed4f951 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Tue Jan 15 11:28:23 2013 +0100
@@ -1177,7 +1177,7 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
  */
 int
 nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order,
+                     unsigned int *page_order, uint8_t *p2m_acc,
                      bool_t access_r, bool_t access_w, bool_t access_x)
 {
     uint32_t pfec;
diff -r 1756a6782c75 -r 51095ed4f951 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Tue Jan 15 11:28:23 2013 +0100
@@ -1532,7 +1532,7 @@ int nvmx_msr_write_intercept(unsigned in
  */
 int
 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order,
+                     unsigned int *page_order, uint8_t *p2m_acc,
                      bool_t access_r, bool_t access_w, bool_t access_x)
 {
     int rc;
@@ -1542,7 +1542,7 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, pad
     uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
-    rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn,
+    rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
                              &exit_qual, &exit_reason);
     switch ( rc )
     {
diff -r 1756a6782c75 -r 51095ed4f951 xen/arch/x86/mm/hap/nested_ept.c
--- a/xen/arch/x86/mm/hap/nested_ept.c  Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/arch/x86/mm/hap/nested_ept.c  Tue Jan 15 11:28:23 2013 +0100
@@ -224,8 +224,8 @@ out:
 
 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
                         unsigned int *page_order, uint32_t rwx_acc,
-                        unsigned long *l1gfn, uint64_t *exit_qual,
-                        uint32_t *exit_reason)
+                        unsigned long *l1gfn, uint8_t *p2m_acc,
+                        uint64_t *exit_qual, uint32_t *exit_reason)
 {
     uint32_t rc, rwx_bits = 0;
     ept_walk_t gw;
@@ -262,6 +262,7 @@ int nept_translate_l2ga(struct vcpu *v, 
         if ( nept_permission_check(rwx_acc, rwx_bits) )
         {
             *l1gfn = gw.lxe[0].mfn;
+            *p2m_acc = (uint8_t)rwx_bits;
             break;
         }
         rc = EPT_TRANSLATE_VIOLATION;
diff -r 1756a6782c75 -r 51095ed4f951 xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Tue Jan 15 11:28:23 2013 +0100
@@ -143,13 +143,13 @@ nestedhap_fix_p2m(struct vcpu *v, struct
  */
 static int
 nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
 
     return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
-        access_r, access_w, access_x);
+        p2m_acc, access_r, access_w, access_x);
 }
 
 
@@ -159,16 +159,15 @@ nestedhap_walk_L1_p2m(struct vcpu *v, pa
  */
 static int
 nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
-                      p2m_type_t *p2mt,
+                      p2m_type_t *p2mt, p2m_access_t *p2ma,
                       unsigned int *page_order,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     mfn_t mfn;
-    p2m_access_t p2ma;
     int rc;
 
     /* walk L0 P2M table */
-    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, &p2ma, 
+    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma,
                               0, page_order);
 
     rc = NESTEDHVM_PAGEFAULT_MMIO;
@@ -207,12 +206,14 @@ nestedhvm_hap_nested_page_fault(struct v
     struct p2m_domain *p2m, *nested_p2m;
     unsigned int page_order_21, page_order_10, page_order_20;
     p2m_type_t p2mt_10;
+    p2m_access_t p2ma_10 = p2m_access_rwx;
+    uint8_t p2ma_21 = p2m_access_rwx;
 
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
     nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21,
+    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
         access_r, access_w, access_x);
 
     /* let caller to handle these two cases */
@@ -230,7 +231,7 @@ nestedhvm_hap_nested_page_fault(struct v
 
     /* ==> we have to walk L0 P2M */
     rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
-        &p2mt_10, &page_order_10,
+        &p2mt_10, &p2ma_10, &page_order_10,
         access_r, access_w, access_x);
 
     /* let upper level caller to handle these two cases */
@@ -251,10 +252,30 @@ nestedhvm_hap_nested_page_fault(struct v
 
     page_order_20 = min(page_order_21, page_order_10);
 
+    ASSERT(p2ma_10 <= p2m_access_n2rwx);
+    /*NOTE: if assert fails, needs to handle new access type here */
+
+    switch ( p2ma_10 )
+    {
+    case p2m_access_n ... p2m_access_rwx:
+        break;
+    case p2m_access_rx2rw:
+        p2ma_10 = p2m_access_rx;
+        break;
+    case p2m_access_n2rwx:
+        p2ma_10 = p2m_access_n;
+        break;
+    default:
+        p2ma_10 = p2m_access_n;
+        /* For safety, remove all permissions. */
+        gdprintk(XENLOG_ERR, "Unhandled p2m access type:%d\n", p2ma_10);
+    }
+    /* Use minimal permission for nested p2m. */
+    p2ma_10 &= (p2m_access_t)p2ma_21;
+
     /* fix p2m_get_pagetable(nested_p2m) */
     nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20,
-        p2mt_10,
-        p2m_access_rwx /* FIXME: Should use minimum permission. */);
+        p2mt_10, p2ma_10);
 
     return NESTEDHVM_PAGEFAULT_DONE;
 }
diff -r 1756a6782c75 -r 51095ed4f951 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Jan 15 11:28:23 2013 +0100
@@ -187,8 +187,8 @@ struct hvm_function_table {
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
                                 paddr_t *L1_gpa, unsigned int *page_order,
-                                bool_t access_r, bool_t access_w,
-                                bool_t access_x);
+                                uint8_t *p2m_acc, bool_t access_r,
+                                bool_t access_w, bool_t access_x);
 };
 
 extern struct hvm_function_table hvm_funcs;
diff -r 1756a6782c75 -r 51095ed4f951 xen/include/asm-x86/hvm/svm/nestedsvm.h
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h   Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h   Tue Jan 15 11:28:23 2013 +0100
@@ -134,7 +134,7 @@ void svm_vmexit_do_clgi(struct cpu_user_
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
 bool_t nestedsvm_gif_isset(struct vcpu *v);
 int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                         unsigned int *page_order,
+                         unsigned int *page_order, uint8_t *p2m_acc,
                          bool_t access_r, bool_t access_w, bool_t access_x);
 
 #define NSVM_INTR_NOTHANDLED     3
diff -r 1756a6782c75 -r 51095ed4f951 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Tue Jan 15 11:23:05 2013 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Tue Jan 15 11:28:23 2013 +0100
@@ -123,7 +123,7 @@ int nvmx_handle_vmxoff(struct cpu_user_r
 
 int
 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order,
+                     unsigned int *page_order, uint8_t *p2m_acc,
                      bool_t access_r, bool_t access_w, bool_t access_x);
 /*
  * Virtual VMCS layout
@@ -208,7 +208,7 @@ int nvmx_n2_vmexit_handler(struct cpu_us
 
 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
                         unsigned int *page_order, uint32_t rwx_acc,
-                        unsigned long *l1gfn, uint64_t *exit_qual,
-                        uint32_t *exit_reason);
+                        unsigned long *l1gfn, uint8_t *p2m_acc,
+                        uint64_t *exit_qual, uint32_t *exit_reason);
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.