[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 1/5] x86/paging: fold most HAP and shadow final teardown


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 9 Jan 2023 14:39:19 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=yskimPD3tXl4FmikgAtMvcJ03/xUebDIDOeJX91rW+Q=; b=KHg1zGii2t2RB72aGsH9fmlw3vmZPlqXjVHO7pzslCdobHcGatCoTv9lqqku7VPFh7EH0C7B/P468G781vvy+zF+HzikvP/D2mdGQTXzrXbJFzE7/G1X5bgVwPxoJZGtTdUk1+5FwOt0OhUQGEng+ols/27ZQDIIyiLC74le5KUwrGV1SjiOSH3OzDFc0gxqDFFF59UHCY7GNjZC4FhKvSWx6cq7SGR/8l4rqmyHlcP8yarDoSbhidczflPHj/FVmWXtMxr6h3COwwG4qRG/iJ/WOPIbG2oQHTXSn1FTsT86LoGpKEHvIbxJaBOIl3+Zj3VRx3NtBXRMuWDLFawBgg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=JJ4khDbk3cVq5YTxYUB4dk52fiu5dfEC2JdUpW/bgZv2WY4hGFK1tBOFRAKW6PERxDS3fGqBKaU5o3WgW9kF91NC79dhBf/lmHYJ/El8M1LQrCJgrghnQd+Oj2ZFAXVZN2/h1HQ7HfdDqrBZp8VJpfWXm2zBF7Y57ccWVL9ylBPuplVn7cSABZiTj3X7vrEIE1vjV1D/4K3Fmu6GFNN/oyyxwvzi0GbjPx2MYoVs2Ln7voabdMqnq0u4dHlgTQiSxvpx3IiuOQjZfYu8rGE5IZP51NCjeBB4OhMEgAfamRRA4waQ3TleAM+axyU+f4GO76YEnbi7jxfG1gfQesa5Sg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>
  • Delivery-date: Mon, 09 Jan 2023 13:39:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

HAP does a few things beyond what's common, which are left there at
least for now. Common operations, however, are moved to
paging_final_teardown(), allowing shadow_final_teardown() to go away.

While moving (and hence generalizing) the respective SHADOW_PRINTK()
drop the logging of total_pages from the 2nd instance - the value is
necessarily zero after {hap,shadow}_set_allocation() - and shorten the
messages, in part accounting for PAGING_PRINTK() logging __func__
already.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
The remaining parts of hap_final_teardown() could be moved as well, at
the price of a CONFIG_HVM conditional. I wasn't sure whether that was
deemed reasonable.
---
v2: Shorten PAGING_PRINTK() messages. Adjust comments while being
    moved.

--- a/xen/arch/x86/include/asm/shadow.h
+++ b/xen/arch/x86/include/asm/shadow.h
@@ -78,9 +78,6 @@ int shadow_domctl(struct domain *d,
 void shadow_vcpu_teardown(struct vcpu *v);
 void shadow_teardown(struct domain *d, bool *preempted);
 
-/* Call once all of the references to the domain have gone away */
-void shadow_final_teardown(struct domain *d);
-
 void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all);
 
 /* Adjust shadows ready for a guest page to change its type. */
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -268,8 +268,8 @@ static void hap_free(struct domain *d, m
 
     /*
      * For dying domains, actually free the memory here. This way less work is
-     * left to hap_final_teardown(), which cannot easily have preemption checks
-     * added.
+     * left to paging_final_teardown(), which cannot easily have preemption
+     * checks added.
      */
     if ( unlikely(d->is_dying) )
     {
@@ -552,18 +552,6 @@ void hap_final_teardown(struct domain *d
     for (i = 0; i < MAX_NESTEDP2M; i++) {
         p2m_teardown(d->arch.nested_p2m[i], true, NULL);
     }
-
-    if ( d->arch.paging.total_pages != 0 )
-        hap_teardown(d, NULL);
-
-    p2m_teardown(p2m_get_hostp2m(d), true, NULL);
-    /* Free any memory that the p2m teardown released */
-    paging_lock(d);
-    hap_set_allocation(d, 0, NULL);
-    ASSERT(d->arch.paging.p2m_pages == 0);
-    ASSERT(d->arch.paging.free_pages == 0);
-    ASSERT(d->arch.paging.total_pages == 0);
-    paging_unlock(d);
 }
 
 void hap_vcpu_teardown(struct vcpu *v)
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -842,10 +842,45 @@ int paging_teardown(struct domain *d)
 /* Call once all of the references to the domain have gone away */
 void paging_final_teardown(struct domain *d)
 {
-    if ( hap_enabled(d) )
+    bool hap = hap_enabled(d);
+
+    PAGING_PRINTK("%pd start: total = %u, free = %u, p2m = %u\n",
+                  d, d->arch.paging.total_pages,
+                  d->arch.paging.free_pages, d->arch.paging.p2m_pages);
+
+    if ( hap )
         hap_final_teardown(d);
+
+    /*
+     * Remove remaining paging memory.  This can be nonzero on certain error
+     * paths.
+     */
+    if ( d->arch.paging.total_pages )
+    {
+        if ( hap )
+            hap_teardown(d, NULL);
+        else
+            shadow_teardown(d, NULL);
+    }
+
+    /* It is now safe to pull down the p2m map. */
+    p2m_teardown(p2m_get_hostp2m(d), true, NULL);
+
+    /* Free any paging memory that the p2m teardown released. */
+    paging_lock(d);
+
+    if ( hap )
+        hap_set_allocation(d, 0, NULL);
     else
-        shadow_final_teardown(d);
+        shadow_set_allocation(d, 0, NULL);
+
+    PAGING_PRINTK("%pd done: free = %u, p2m = %u\n",
+                  d, d->arch.paging.free_pages, d->arch.paging.p2m_pages);
+    ASSERT(!d->arch.paging.p2m_pages);
+    ASSERT(!d->arch.paging.free_pages);
+    ASSERT(!d->arch.paging.total_pages);
+
+    paging_unlock(d);
 
     p2m_final_teardown(d);
 }
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1194,7 +1194,7 @@ void shadow_free(struct domain *d, mfn_t
 
         /*
          * For dying domains, actually free the memory here. This way less
-         * work is left to shadow_final_teardown(), which cannot easily have
+         * work is left to paging_final_teardown(), which cannot easily have
          * preemption checks added.
          */
         if ( unlikely(dying) )
@@ -2898,35 +2898,6 @@ out:
     }
 }
 
-void shadow_final_teardown(struct domain *d)
-/* Called by arch_domain_destroy(), when it's safe to pull down the p2m map. */
-{
-    SHADOW_PRINTK("dom %u final teardown starts."
-                   "  Shadow pages total = %u, free = %u, p2m=%u\n",
-                   d->domain_id, d->arch.paging.total_pages,
-                   d->arch.paging.free_pages, d->arch.paging.p2m_pages);
-
-    /* Double-check that the domain didn't have any shadow memory.
-     * It is possible for a domain that never got domain_kill()ed
-     * to get here with its shadow allocation intact. */
-    if ( d->arch.paging.total_pages != 0 )
-        shadow_teardown(d, NULL);
-
-    /* It is now safe to pull down the p2m map. */
-    p2m_teardown(p2m_get_hostp2m(d), true, NULL);
-    /* Free any shadow memory that the p2m teardown released */
-    paging_lock(d);
-    shadow_set_allocation(d, 0, NULL);
-    SHADOW_PRINTK("dom %u final teardown done."
-                   "  Shadow pages total = %u, free = %u, p2m=%u\n",
-                   d->domain_id, d->arch.paging.total_pages,
-                   d->arch.paging.free_pages, d->arch.paging.p2m_pages);
-    ASSERT(d->arch.paging.p2m_pages == 0);
-    ASSERT(d->arch.paging.free_pages == 0);
-    ASSERT(d->arch.paging.total_pages == 0);
-    paging_unlock(d);
-}
-
 static int shadow_one_bit_enable(struct domain *d, u32 mode)
 /* Turn on a single shadow mode feature */
 {




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.