[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 09/10] x86: introduce GADDR based secondary time area registration alternative


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx, henry.wang@xxxxxxx
  • From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • Date: Mon, 2 Oct 2023 17:11:26 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=7m2lah6wFlfwWH+A26m4qMx9JvqC1q31TDTiVbHZVwE=; b=JhK6fpJM+kTBKRt29lBa9ROx2IP1RZ7LSji2ioJPzDfmlAZ2ENl6FUHLEezNwJZjvq9LeBf+4w15i79gQJAV8qbur8SEKFMoGW179RtxnZJYGVdOVSUiThyCoKXygRaX8kMDn5w5nDC0MdyRmNGe+r//qQQBQ8BIbwZidYq6WvD5zFJaDrE2ARcBlXqxb8Sr2o1LfmHe6n12ey0bTyRgFelAlWdWvPp2aabeMC1IPKf7m0/NfN5C4lLANqtWcSbF3qgqeOT9L9uvYFH0xsOkyC/VcT0tIgycplRUIwvobrIxjpSw+qnL2+qN6KjDed1QLVMxpk6Eoog7EBjvh8FUyw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=B6kI5LCSU2srIB1HfXrjCwMEFt0JS9nLmfA1cIRT+6xhDABP383HQxmxSxIBdhaM6DmqSNOTom9+bxwxgB3K1awH46/52wU3fkzJWTxKsbAnmK8HQ9QeuPYDj3YZCa4eGNrvw0CdMSGIq4KXKypRDLkFNbfCv7IvL6fc8qOBUBhJB8bcoigiB3ZqyCEx0Ha7rA304Cp3potgLgh2SDOH8314FY+uE6/phbMwfqXHpN1pz6ny3qJtc+qIYzcI1ya7tTv2Xav0aQAAi52TKkhn7da5MiFTQ59ZkhGAvKEYz5I/C8iL/zJ3evPiEn/uZumyUmAVF0nWjQSKNFxoTWNhbw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=citrix.com;
  • Cc: Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>
  • Delivery-date: Mon, 02 Oct 2023 15:27:02 +0000
  • Ironport-data: A9a23:XVvm86JGMkjTNITLFE+RCpQlxSXFcZb7ZxGr2PjKsXjdYENS0mcCn zRMWj+GaamJazD8eIx3boyy9ksFvpPTx9ExSgVlqX01Q3x08seUXt7xwmUcnc+xBpaaEB84t ZV2hv3odp1coqr0/0/1WlTZhSAhk/nOHvylULKs1hlZHWdMUD0mhQ9oh9k3i4tphcnRKw6Ws Jb5rta31GWNglaYCUpKrfrYwP9TlK6q4mhB5gRkPakjUGL2zBH5MrpOfcldEFOgKmVkNrbSb /rOyri/4lTY838FYj9yuu+mGqGiaue60Tmm0hK6aYD76vRxjnVaPpIAHOgdcS9qZwChxLid/ jnvWauYEm/FNoWU8AgUvoIx/ytWZcWq85efSZSzXFD6I+QrvBIAzt03ZHzaM7H09c5aPWFlr 8QHcQsoLSiaxPKUnZCRT9dV05FLwMnDZOvzu1lG5BSBV7MMZ8mGRK/Ho9hFwD03m8ZCW+7EY NYUYiZuaxKGZABTPlAQC9Q1m+LAanvXKmUE7g7K4/dupTSOpOBy+OGF3N79YNuFSN8Thk+Fj mnH4374ElcRM9n3JT+tqyj33raRw36iMG4UPLez3d57h0+j/TAwAToZCWSlquGFpXfrDrqzL GRRoELCt5Ma5EGtC9XwQRC8iHqFpQIHHcpdFfUg7wOAwbaS5ByWbkAEQzhbeZo5vck5bTUw0 xmCmNaBLT5lvaCRSHmd3qyJtj70Mi8QRUcdYQcUQA1D5MPsyKkxkxbOQ9BLAKOzyNrvFlnY2 CuWpSIzg7ESi88j1Kih+13DxTW2qfD0ohUd4wzWWiev6Fp/bYv9PYiwswCHsLBHMZqTSUSHs D4cgc+C4esSDJaL0iuQXOEKG7Lv7PGAWNHBvWNS81Aa32zF0xaekUp4uVmS+G8B3h44RALU
  • Ironport-hdrordr: A9a23:Bhuvq6NHWMNAzcBcTsWjsMiBIKoaSvp037BL7SxMoHluGfBw+P rAoB1273HJYVQqOE3I6OrgBEDoexq1n/NICOIqTNSftWfdyQ+VBbAnwYz+wyDxXw3Sn9Qtsp uIqpIOauEY22IK6PrH3A==
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

From: Jan Beulich <jbeulich@xxxxxxxx>

The registration by virtual/linear address has downsides: The access is
expensive for HVM/PVH domains. Furthermore for 64-bit PV domains the area
is inaccessible (and hence cannot be updated by Xen) when in guest-user
mode.

Introduce a new vCPU operation allowing to register the secondary time
area by guest-physical address.

An at least theoretical downside to using physically registered areas is
that PV then won't see dirty (and perhaps also accessed) bits set in its
respective page table entries.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/domain.c             | 28 ++++++++++++++++++++++++++++
 xen/arch/x86/include/asm/domain.h |  2 ++
 xen/arch/x86/time.c               | 10 ++++++++++
 xen/arch/x86/x86_64/domain.c      |  1 +
 xen/include/public/vcpu.h         |  4 ++++
 5 files changed, 45 insertions(+)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9d352defa25e..8e0af2278104 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1529,6 +1529,15 @@ int arch_vcpu_reset(struct vcpu *v)
     return 0;
 }
 
+static void cf_check
+time_area_populate(void *map, struct vcpu *v)
+{
+    if ( is_pv_vcpu(v) )
+        v->arch.pv.pending_system_time.version = 0;
+
+    force_update_secondary_system_time(v, map);
+}
+
 long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     long rc = 0;
@@ -1567,6 +1576,25 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
+    case VCPUOP_register_vcpu_time_phys_area:
+    {
+        struct vcpu_register_time_memory_area area;
+
+        rc = -EFAULT;
+        if ( copy_from_guest(&area.addr.p, arg, 1) )
+            break;
+
+        rc = map_guest_area(v, area.addr.p,
+                            sizeof(vcpu_time_info_t),
+                            &v->arch.time_guest_area,
+                            time_area_populate);
+        if ( rc == -ERESTART )
+            rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
+                                               cmd, vcpuid, arg);
+
+        break;
+    }
+
     case VCPUOP_get_physid:
     {
         struct vcpu_get_physid cpu_id;
diff --git a/xen/arch/x86/include/asm/domain.h 
b/xen/arch/x86/include/asm/domain.h
index e0bd28e424e0..619e667938ed 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -692,6 +692,8 @@ void domain_cpu_policy_changed(struct domain *d);
 
 bool update_secondary_system_time(struct vcpu *,
                                   struct vcpu_time_info *);
+void force_update_secondary_system_time(struct vcpu *,
+                                        struct vcpu_time_info *);
 
 void vcpu_show_registers(const struct vcpu *);
 
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 332d2d79aeae..73df1639a301 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1628,6 +1628,16 @@ void force_update_vcpu_system_time(struct vcpu *v)
     __update_vcpu_system_time(v, 1);
 }
 
+void force_update_secondary_system_time(struct vcpu *v,
+                                        struct vcpu_time_info *map)
+{
+    struct vcpu_time_info u;
+
+    collect_time_info(v, &u);
+    u.version = -1; /* Compensate for version_update_end(). */
+    write_time_guest_area(map, &u);
+}
+
 static void update_domain_rtc(void)
 {
     struct domain *d;
diff --git a/xen/arch/x86/x86_64/domain.c b/xen/arch/x86/x86_64/domain.c
index 494b0b54e64e..a02d4f569ee5 100644
--- a/xen/arch/x86/x86_64/domain.c
+++ b/xen/arch/x86/x86_64/domain.c
@@ -115,6 +115,7 @@ compat_vcpu_op(int cmd, unsigned int vcpuid, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
     case VCPUOP_send_nmi:
     case VCPUOP_get_physid:
+    case VCPUOP_register_vcpu_time_phys_area:
         rc = do_vcpu_op(cmd, vcpuid, arg);
         break;
 
diff --git a/xen/include/public/vcpu.h b/xen/include/public/vcpu.h
index 9dac0f9748ca..8fb0bd1b6c03 100644
--- a/xen/include/public/vcpu.h
+++ b/xen/include/public/vcpu.h
@@ -209,6 +209,9 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
  * segment limit).  It can then apply the normal algorithm to compute
  * system time from the tsc.
  *
+ * New code wants to prefer VCPUOP_register_vcpu_time_phys_area, and only
+ * fall back to the operation here for backwards compatibility.
+ *
  * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
  */
 #define VCPUOP_register_vcpu_time_memory_area   13
@@ -235,6 +238,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
  * VMASST_TYPE_runstate_update_flag engaged by the domain.
  */
 #define VCPUOP_register_runstate_phys_area      14
+#define VCPUOP_register_vcpu_time_phys_area     15
 
 #endif /* __XEN_PUBLIC_VCPU_H__ */
 
-- 
2.42.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.