[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 09/14] x86/P2M: derive HVM-only variant from __get_gfn_type_access()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 23 Feb 2022 17:03:22 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=ldFCmhVgf2hohYtDOIo5qnNRmC9eRJje8GLGX9RkTuY=; b=CDotKA3uMoCbkamW6h1Yzkv8IV94uw3fORH99R18nQA1tSr8a1c7aljXSZGrgp0HGsJVcVdp6QCczQuVVQvjy4LkS/MeakY2sbQJtT2XF/9A9TPV/hytrtKEhHCFbmBox/e6EKiVDIcGuHIjKj3xCcRvdH5jPPqhSlLvn25YRQuuDzXq1jp4Gcj0B5j5D/tOyqmFg7Vc9x/7tASqPteHDu8tpOoKzPdJIZKRGXER//UYn1/InhERQtLDfiHuyfShr7CNcEWXuptNK6HI17xkZvONgllu4azhhcCVRsTsFsbdosZE/nKwqefkcGtTB8dx4hMdhC2bhhawXEZ4wu5tew==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=dPkIcQVyhHbPH9XYk++KieGKiHzaI2VGt7PWwB0Xo2sOQp0Vg2MW0O48bJAHGRDq0W8UnTOOkFSk+4xOek0aFYHymD47Qq0WpGD1MCjoHXfmzvP+v/g51Mv+X/DWh3O2gPCrgR7uhc9l88onX8Qon7D4/BSDdHAXce78qCyTjuCgbLqiSYbGwcFF5e8TkEtA96qXqIin1dtq40+jjp/Xv8q91xMJa/4cw4XGkjYuMGEJyL04c7spmZBSbnDEgvGDk8fxADP3n6FjdNYu16hc3JSkQb6k39L0Ojc2YK1Ccv/vxrwb9o7BexjdNVuqNaioiuT5Lwscokkz4RUWRm7+oQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
  • Delivery-date: Wed, 23 Feb 2022 16:03:30 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Introduce an inline wrapper dealing with the non-translated-domain case,
while stripping that logic from the main function, which gets renamed to
p2m_get_gfn_type_access(). HVM-only callers can then directly use the
main function.

Along with renaming the main function also make its and the new inline
helper's GFN parameters type-safe.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>

--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1716,7 +1716,7 @@ static void svm_do_nested_pgfault(struct
         } _d;
 
         p2m = p2m_get_p2m(v);
-        mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
+        mfn = p2m_get_gfn_type_access(p2m, _gfn(gfn), &p2mt, &p2ma, 0, NULL, 
0);
 
         _d.gpa = gpa;
         _d.qualification = 0;
@@ -1741,7 +1741,7 @@ static void svm_do_nested_pgfault(struct
     if ( p2m == NULL )
     {
         p2m = p2m_get_p2m(v);
-        mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
+        mfn = p2m_get_gfn_type_access(p2m, _gfn(gfn), &p2mt, &p2ma, 0, NULL, 
0);
     }
     gdprintk(XENLOG_ERR,
          "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -459,10 +459,27 @@ void p2m_unlock_and_tlb_flush(struct p2m
  * After calling any of the variants below, caller needs to use
  * put_gfn. ****/
 
-mfn_t __nonnull(3, 4) __get_gfn_type_access(
-    struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
+mfn_t __nonnull(3, 4) p2m_get_gfn_type_access(
+    struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
     p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked);
 
+static inline mfn_t __nonnull(3, 4) _get_gfn_type_access(
+    struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
+    p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked)
+{
+    if ( !p2m || !paging_mode_translate(p2m->domain) )
+    {
+        /*
+         * Not necessarily true, but for non-translated guests we claim
+         * it's the most generic kind of memory.
+         */
+        *t = p2m_ram_rw;
+        return _mfn(gfn_x(gfn));
+    }
+
+    return p2m_get_gfn_type_access(p2m, gfn, t, a, q, page_order, locked);
+}
+
 /* Read a particular P2M table, mapping pages as we go.  Most callers
  * should _not_ call this directly; use the other get_gfn* functions
  * below unless you know you want to walk a p2m that isn't a domain's
@@ -474,7 +491,7 @@ static inline mfn_t __nonnull(3, 4) get_
     struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
     p2m_access_t *a, p2m_query_t q, unsigned int *page_order)
 {
-    return __get_gfn_type_access(p2m, gfn, t, a, q, page_order, true);
+    return _get_gfn_type_access(p2m, _gfn(gfn), t, a, q, page_order, true);
 }
 
 /* General conversion function from gfn to mfn */
@@ -515,7 +532,8 @@ static inline mfn_t get_gfn_query_unlock
                                            p2m_type_t *t)
 {
     p2m_access_t a;
-    return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
+    return _get_gfn_type_access(p2m_get_hostp2m(d), _gfn(gfn), t, &a, 0,
+                                NULL, 0);
 }
 
 /* Atomically look up a GFN and take a reference count on the backing page.
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -299,8 +299,9 @@ static int set_mem_access(struct domain
     {
         p2m_access_t _a;
         p2m_type_t t;
-        mfn_t mfn = __get_gfn_type_access(p2m, gfn_x(gfn), &t, &_a,
-                                          P2M_ALLOC, NULL, false);
+        mfn_t mfn = p2m_get_gfn_type_access(p2m, gfn, &t, &_a,
+                                            P2M_ALLOC, NULL, false);
+
         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
     }
 
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -478,12 +478,12 @@ do {
 #undef assign_pointers
 
     /* Now do the gets. */
-    *first_mfn  = __get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
-                                        gfn_x(rval->first_gfn), first_t,
-                                        first_a, q, NULL, lock);
-    *second_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
-                                        gfn_x(rval->second_gfn), second_t,
-                                        second_a, q, NULL, lock);
+    *first_mfn  = p2m_get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
+                                          rval->first_gfn, first_t,
+                                          first_a, q, NULL, lock);
+    *second_mfn = p2m_get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
+                                          rval->second_gfn, second_t,
+                                          second_a, q, NULL, lock);
 }
 
 static void put_two_gfns(const struct two_gfns *arg)
@@ -936,8 +936,8 @@ static int nominate_page(struct domain *
             if ( !ap2m )
                 continue;
 
-            amfn = __get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma,
-                                         0, NULL, false);
+            amfn = p2m_get_gfn_type_access(ap2m, gfn, &ap2mt, &ap2ma,
+                                           0, NULL, false);
             if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) )
             {
                 altp2m_list_unlock(d);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -286,25 +286,13 @@ void p2m_unlock_and_tlb_flush(struct p2m
         mm_write_unlock(&p2m->lock);
 }
 
-mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
-                    p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-                    unsigned int *page_order, bool_t locked)
-{
 #ifdef CONFIG_HVM
-    mfn_t mfn;
-    gfn_t gfn = _gfn(gfn_l);
 
-    if ( !p2m || !paging_mode_translate(p2m->domain) )
-    {
-#endif
-        /*
-         * Not necessarily true, but for non-translated guests we claim
-         * it's the most generic kind of memory.
-         */
-        *t = p2m_ram_rw;
-        return _mfn(gfn_l);
-#ifdef CONFIG_HVM
-    }
+mfn_t p2m_get_gfn_type_access(struct p2m_domain *p2m, gfn_t gfn,
+                              p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
+                              unsigned int *page_order, bool_t locked)
+{
+    mfn_t mfn;
 
     /* Unshare makes no sense without populate. */
     if ( q & P2M_UNSHARE )
@@ -329,8 +317,8 @@ mfn_t __get_gfn_type_access(struct p2m_d
          * Try to unshare. If we fail, communicate ENOMEM without
          * sleeping.
          */
-        if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 )
-            mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
+        if ( mem_sharing_unshare_page(p2m->domain, gfn_x(gfn)) < 0 )
+            mem_sharing_notify_enomem(p2m->domain, gfn_x(gfn), false);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
     }
 
@@ -343,9 +331,10 @@ mfn_t __get_gfn_type_access(struct p2m_d
     }
 
     return mfn;
-#endif
 }
 
+#endif /* CONFIG_HVM */
+
 void __put_gfn(struct p2m_domain *p2m, unsigned long gfn)
 {
     if ( !p2m || !paging_mode_translate(p2m->domain) )
@@ -377,7 +366,7 @@ struct page_info *p2m_get_page_from_gfn(
     {
         /* Fast path: look up and get out */
         p2m_read_lock(p2m);
-        mfn = __get_gfn_type_access(p2m, gfn_x(gfn), t, a, 0, NULL, 0);
+        mfn = p2m_get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
         if ( p2m_is_any_ram(*t) && mfn_valid(mfn)
              && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
         {
@@ -1775,8 +1764,8 @@ int altp2m_get_effective_entry(struct p2
         unsigned int page_order;
         int rc;
 
-        *mfn = __get_gfn_type_access(hp2m, gfn_x(gfn), t, a,
-                                     P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
+        *mfn = p2m_get_gfn_type_access(hp2m, gfn, t, a, P2M_ALLOC | 
P2M_UNSHARE,
+                                       &page_order, 0);
 
         rc = -ESRCH;
         if ( !mfn_valid(*mfn) || *t != p2m_ram_rw )




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.