[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/mm: drop p2mt parameter from map_domain_gfn()



commit b2426381b1a768e783cba6d6f672d979dd61913c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Feb 26 17:35:07 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 26 17:35:07 2020 +0100

    x86/mm: drop p2mt parameter from map_domain_gfn()
    
    No caller actually consumes it.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/dom0_build.c    | 3 +--
 xen/arch/x86/mm/guest_walk.c     | 4 ----
 xen/arch/x86/mm/hap/nested_ept.c | 3 +--
 xen/arch/x86/mm/p2m.c            | 9 +++++----
 xen/include/asm-x86/p2m.h        | 2 +-
 5 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 85326ff63e..eded87eaf5 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -278,7 +278,6 @@ static int __init pvh_add_mem_range(struct domain *d, 
uint64_t s, uint64_t e,
 
 static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
 {
-    p2m_type_t p2mt;
     uint32_t rc, *ident_pt;
     mfn_t mfn;
     paddr_t gaddr;
@@ -317,7 +316,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct 
domain *d)
      * superpages.
      */
     ident_pt = map_domain_gfn(p2m_get_hostp2m(d), _gfn(PFN_DOWN(gaddr)),
-                              &mfn, &p2mt, 0, &rc);
+                              &mfn, 0, &rc);
     if ( ident_pt == NULL )
     {
         printk("Unable to map identity page tables\n");
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index f67aeda3d0..ab7021a1ce 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -85,7 +85,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
                   uint32_t walk, mfn_t top_mfn, void *top_map)
 {
     struct domain *d = v->domain;
-    p2m_type_t p2mt;
     guest_l1e_t *l1p = NULL;
     guest_l2e_t *l2p = NULL;
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
@@ -153,7 +152,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     l3p = map_domain_gfn(p2m,
                          guest_l4e_get_gfn(gw->l4e),
                          &gw->l3mfn,
-                         &p2mt,
                          qt,
                          &rc);
     if ( l3p == NULL )
@@ -232,7 +230,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     l2p = map_domain_gfn(p2m,
                          guest_l3e_get_gfn(gw->l3e),
                          &gw->l2mfn,
-                         &p2mt,
                          qt,
                          &rc);
     if ( l2p == NULL )
@@ -326,7 +323,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     l1p = map_domain_gfn(p2m,
                          guest_l2e_get_gfn(gw->l2e),
                          &gw->l1mfn,
-                         &p2mt,
                          qt,
                          &rc);
     if ( l1p == NULL )
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
index 5424595e60..1cb7fefc37 100644
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -151,7 +151,6 @@ static uint32_t
 nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw)
 {
     int lvl;
-    p2m_type_t p2mt;
     uint32_t rc = 0, ret = 0, gflags;
     struct domain *d = v->domain;
     struct p2m_domain *p2m = d->arch.p2m;
@@ -163,7 +162,7 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga, 
ept_walk_t *gw)
 
     for (lvl = 4; lvl > 0; lvl--)
     {
-        lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, &p2mt, P2M_ALLOC, &rc);
+        lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, P2M_ALLOC, &rc);
         if ( !lxp )
             goto map_err;
         gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c5f428d67c..3719deae77 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2199,8 +2199,9 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
  * synthetic/structure PFEC_* bits.
  */
 void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
-                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *pfec)
+                     p2m_query_t q, uint32_t *pfec)
 {
+    p2m_type_t p2mt;
     struct page_info *page;
 
     if ( !gfn_valid(p2m->domain, gfn) )
@@ -2210,8 +2211,8 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, 
mfn_t *mfn,
     }
 
     /* Translate the gfn, unsharing if shared. */
-    page = p2m_get_page_from_gfn(p2m, gfn, p2mt, NULL, q);
-    if ( p2m_is_paging(*p2mt) )
+    page = p2m_get_page_from_gfn(p2m, gfn, &p2mt, NULL, q);
+    if ( p2m_is_paging(p2mt) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
         if ( page )
@@ -2220,7 +2221,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, 
mfn_t *mfn,
         *pfec = PFEC_page_paged;
         return NULL;
     }
-    if ( p2m_is_shared(*p2mt) )
+    if ( p2m_is_shared(p2mt) )
     {
         if ( page )
             put_page(page);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 928a7c627a..0cf531abb7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -762,7 +762,7 @@ int __must_check p2m_set_entry(struct p2m_domain *p2m, 
gfn_t gfn, mfn_t mfn,
 extern void p2m_pt_init(struct p2m_domain *p2m);
 
 void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
-                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *pfec);
+                     p2m_query_t q, uint32_t *pfec);
 
 /* Debugging and auditing of the P2M code? */
 #ifndef NDEBUG
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.