[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[QEMU][PATCH v3 02/10] hw/i386/xen: rearrange xen_hvm_init_pc


  • To: <qemu-devel@xxxxxxxxxx>
  • From: Vikram Garhwal <vikram.garhwal@xxxxxxx>
  • Date: Wed, 25 Jan 2023 00:43:47 -0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=nongnu.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=xQRygdwVbnDYBJcZUcE7RFsjWA1hPALnId6WXnaDoUU=; b=PF/BTKfZemBKX/vU7XaCm1b8YBKuTB8Nukp4b8RVLlcAdPz9HESzvGQGR9zL6PvbbR6RN0iIDyY3SXDWlCnGrCMHz/KblIP7t+DMABQzCcnc+BMPEWmRdwLca3+Q6cOS4HqW6swxxhxVE2zvWqHWE/ZeUbUN22m4gCnihbT/6i0ILhetfYBl34CvnXJEyvFkfrJJYLfJKpFoAlW6rX28tWNHXa60LCWQWh2V6UNVeN/YismU9y05fOiIuaz0OYUYDos0EQjoIpVCilG82t2C/QrbsSotjdogD7sRR0tRJTNJ3Na4QQXFL21WpNjCPXTk6A62Z8AwvhHv/q0vvrxq5g==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=l4KXXaQMRaejNI2NxT1L9b7SVGKe0MgvOJqGQgp6rmk/aOSXo7MV6bSz9DquPztYCcpBGq6jxVvfal4ZSe2IvSeKCXO4DHkWKb7lIhaboJz3YJxdSHQDulSpYwZXNpdylx76NSbh5lNbODQ5MioXWLfEHqIv/l+0x0NDYXTqQ+skiCSHf+XieCaWKNIe6Qk3nTrPr2cKeWaaOf3GuUd410WGXpDa3IZfKk97iu6o2Krju0Gt3i+c7tf72XJpRc3+6bAEhu2ju5TeDMaU3j0f+Kv0vptRDJv36lkJINZvAZrg5L+ZxeTgohs5TrVECNiSk0gwJXaDSZtwhfhMpr8jgA==
  • Cc: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <vikram.garhwal@xxxxxxx>, <stefano.stabellini@xxxxxxx>, <alex.bennee@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Anthony Perard <anthony.perard@xxxxxxxxxx>, "Paul Durrant" <paul@xxxxxxx>, Paolo Bonzini <pbonzini@xxxxxxxxxx>, "Richard Henderson" <richard.henderson@xxxxxxxxxx>, Eduardo Habkost <eduardo@xxxxxxxxxxx>, "Michael S. Tsirkin" <mst@xxxxxxxxxx>, "Marcel Apfelbaum" <marcel.apfelbaum@xxxxxxxxx>
  • Delivery-date: Wed, 25 Jan 2023 08:46:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

In preparation to moving most of xen-hvm code to an arch-neutral location,
move non IOREQ references to:
- xen_get_vmport_regs_pfn
- xen_suspend_notifier
- xen_wakeup_notifier
- xen_ram_init

towards the end of the xen_hvm_init_pc() function.

This is done to keep the common ioreq functions in one place which will be
moved to new function in next patch in order to make it common to both x86 and
aarch64 machines.

Signed-off-by: Vikram Garhwal <vikram.garhwal@xxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxx>
Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
 hw/i386/xen/xen-hvm.c | 49 ++++++++++++++++++++++---------------------
 1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index b9a6f7f538..1fba0e0ae1 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -1416,12 +1416,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion 
**ram_memory)
     state->exit.notify = xen_exit_notifier;
     qemu_add_exit_notifier(&state->exit);
 
-    state->suspend.notify = xen_suspend_notifier;
-    qemu_register_suspend_notifier(&state->suspend);
-
-    state->wakeup.notify = xen_wakeup_notifier;
-    qemu_register_wakeup_notifier(&state->wakeup);
-
     /*
      * Register wake-up support in QMP query-current-machine API
      */
@@ -1432,23 +1426,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion 
**ram_memory)
         goto err;
     }
 
-    rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
-    if (!rc) {
-        DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
-        state->shared_vmport_page =
-            xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
-                                 1, &ioreq_pfn, NULL);
-        if (state->shared_vmport_page == NULL) {
-            error_report("map shared vmport IO page returned error %d 
handle=%p",
-                         errno, xen_xc);
-            goto err;
-        }
-    } else if (rc != -ENOSYS) {
-        error_report("get vmport regs pfn returned error %d, rc=%d",
-                     errno, rc);
-        goto err;
-    }
-
     /* Note: cpus is empty at this point in init */
     state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
 
@@ -1486,7 +1463,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion 
**ram_memory)
 #else
     xen_map_cache_init(NULL, state);
 #endif
-    xen_ram_init(pcms, ms->ram_size, ram_memory);
 
     qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
 
@@ -1513,6 +1489,31 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion 
**ram_memory)
     QLIST_INIT(&xen_physmap);
     xen_read_physmap(state);
 
+    state->suspend.notify = xen_suspend_notifier;
+    qemu_register_suspend_notifier(&state->suspend);
+
+    state->wakeup.notify = xen_wakeup_notifier;
+    qemu_register_wakeup_notifier(&state->wakeup);
+
+    rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
+    if (!rc) {
+        DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
+        state->shared_vmport_page =
+            xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
+                                 1, &ioreq_pfn, NULL);
+        if (state->shared_vmport_page == NULL) {
+            error_report("map shared vmport IO page returned error %d 
handle=%p",
+                         errno, xen_xc);
+            goto err;
+        }
+    } else if (rc != -ENOSYS) {
+        error_report("get vmport regs pfn returned error %d, rc=%d",
+                     errno, rc);
+        goto err;
+    }
+
+    xen_ram_init(pcms, ms->ram_size, ram_memory);
+
     /* Disable ACPI build because Xen handles it */
     pcms->acpi_build_enabled = false;
 
-- 
2.17.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.