[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xenpaging: use p2m->get_entry() in p2m_mem_paging functions



# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1317900797 -3600
# Node ID ecab267b85ef0f6721a760ca1b1966d8cef1c7c6
# Parent  0b66e6450ffe6823d8b323ef4248b38fe7372d54
xenpaging: use p2m->get_entry() in p2m_mem_paging functions

Use p2m->get_entry() in the p2m_mem_paging functions. This preserves the
p2m_access type when gfn is updated with set_p2m_entry().
Its also a preparation for locking fixes in a subsequent patch.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 0b66e6450ffe -r ecab267b85ef xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Oct 04 14:18:30 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c     Thu Oct 06 12:33:17 2011 +0100
@@ -671,10 +671,11 @@
     struct page_info *page;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
     int ret;
 
-    mfn = gfn_to_mfn(p2m->domain, gfn, &p2mt);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
@@ -701,7 +702,7 @@
 
     /* Fix p2m entry */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
+    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -715,11 +716,12 @@
 {
     struct page_info *page;
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Get mfn */
-    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( unlikely(!mfn_valid(mfn)) )
         return -EINVAL;
 
@@ -738,8 +740,7 @@
 
     /* Remove mapping from p2m table */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                  p2m_ram_paged, p2m->default_access);
+    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_ram_paged, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -775,6 +776,7 @@
     struct vcpu *v = current;
     mem_event_request_t req;
     p2m_type_t p2mt;
+    p2m_access_t a;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
@@ -787,12 +789,12 @@
     /* Fix p2m mapping */
     /* XXX: It seems inefficient to have this here, as it's only needed
      *      in one case (ept guest accessing paging out page) */
-    gfn_to_mfn(d, gfn, &p2mt);
+    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( p2mt == p2m_ram_paged )
     {
         p2m_lock(p2m);
         set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                      p2m_ram_paging_in_start, p2m->default_access);
+                      p2m_ram_paging_in_start, a);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
     }
@@ -821,8 +823,11 @@
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
 {
     struct page_info *page;
+    p2m_type_t p2mt;
+    p2m_access_t a;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
+    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     /* Get a free page */
     page = alloc_domheap_page(p2m->domain, 0);
     if ( unlikely(page == NULL) )
@@ -830,7 +835,7 @@
 
     /* Fix p2m mapping */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, 
p2m->default_access);
+    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -844,6 +849,7 @@
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     mem_event_response_t rsp;
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
 
     /* Pull the response off the ring */
@@ -852,9 +858,9 @@
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
     {
-        mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
+        mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
         p2m_lock(p2m);
-        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
+        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a);
         set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.