[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN v4 03/11] xen/Arm: vreg: Support vreg_reg64_* helpers on AArch32


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Ayan Kumar Halder <ayan.kumar.halder@xxxxxxx>
  • Date: Mon, 28 Nov 2022 15:56:41 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=Wo2+sVo6LtAt5RMJpKRO/GdQYNQZj2Z4JvovHOpUzxI=; b=aYvc1Snsv/vCgvpMPP+OYWdEcjdvE85gSo6bIpvpgml4Z/CpiIwMh+/0mUfupETuxmhWBKW6PXuWajcQOFA8a3Ol0Sd7CUSaTW1KgUI1gGmlDkuVwWVxoQ3MMF4705skm2Bvs/Fh7OCNDUV+yonYB8xJTZ+zXtqHpA/Nb98PN6/S+nJbq+mpmrUKbULvrZEjaDhztkIDs7rW/KoDxL/bZ1cNmjDXWJo/K0W9+V1eeg/MmNSBPlrj2zvsVetAhpQNuTFJhdreUvQRrCXPuNceRLC277ETQh24N0pfHD0wN1nxc6pPw/QqgaI3NHfTRCF/6+aRrXKTikk9CEiU8CmS7w==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=QV9RBHR5CCEwDJ5Ez3NA6enHDWUZEuhv56RtzPzGpYjVHPXmr09igoNL4SMmgF2lSAQaYpFW8RNJ7Rlt8z0JV6pErbWqZOcjAsoCoFNF6QzH88R3NWqW/ZzMk6xX5Tp0rop1mzH0CIz2tE59zVclhdn33nVu7oDM2qSzQLddlUuVeifns9ucQgs7A3V4kB+X9gZDaiqdXFDi7WYRFxm7D/CmMqVQZckjaE0G5+67BU6xC1+YJRGXukYgOjQMMJ72Z8Gxqs9kP9vIEziE4dogNSdWC97TJjsJm5YXzhQYfJCZLXde4VmPKx14Xzovwdm5ZY/L5Lknys8fCHz8VvCqcA==
  • Cc: <sstabellini@xxxxxxxxxx>, <stefanos@xxxxxxxxxx>, <julien@xxxxxxx>, <Volodymyr_Babchuk@xxxxxxxx>, <bertrand.marquis@xxxxxxx>, <michal.orzel@xxxxxxx>, <jgrall@xxxxxxxxxx>, <burzalodowa@xxxxxxxxx>, "Ayan Kumar Halder" <ayan.kumar.halder@xxxxxxx>
  • Delivery-date: Mon, 28 Nov 2022 15:57:30 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

In some situations (e.g. GICR_TYPER), the hypervior may need to emulate
64bit registers in AArch32 mode. In such situations, the hypervisor may
need to read/modify the lower or upper 32 bits of the 64 bit register.

In AArch32, 'unsigned long' is 32 bits. Thus, we cannot use it for 64 bit
registers.

While we could replace 'unsigned long' by 'uint64_t', it is not entirely clear
whether a 32-bit compiler would not allocate register for the upper 32-bit.
Therefore fold vreg_reg_* helper in the size specific one and use the
appropriate type based on the size requested.

Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@xxxxxxx>
Reviewed-by: Michal Orzel <michal.orzel@xxxxxxx>
Acked-by: Julien Grall <julien@xxxxxxx>
---

Changes from -

v1 - 1. Remove vreg_reg_extract(), vreg_reg_update(), vreg_reg_setbits() and
vreg_reg_clearbits(). Moved the implementation to  vreg_reg##sz##_*.
'mask' and 'val' is now using uint##sz##_t.

v2 - 1. Use 'unsigned int' for 'shift' variable.
2. Updated the commit message.

v3 - 1. No changes. Added Rb and Ack.

 xen/arch/arm/include/asm/vreg.h | 86 ++++++++-------------------------
 1 file changed, 19 insertions(+), 67 deletions(-)

diff --git a/xen/arch/arm/include/asm/vreg.h b/xen/arch/arm/include/asm/vreg.h
index f26a70d024..d92450017b 100644
--- a/xen/arch/arm/include/asm/vreg.h
+++ b/xen/arch/arm/include/asm/vreg.h
@@ -89,106 +89,58 @@ static inline bool vreg_emulate_sysreg(struct 
cpu_user_regs *regs, union hsr hsr
  * The check on the size supported by the register has to be done by
  * the caller of vreg_regN_*.
  *
- * vreg_reg_* should never be called directly. Instead use the vreg_regN_*
- * according to size of the emulated register
- *
  * Note that the alignment fault will always be taken in the guest
  * (see B3.12.7 DDI0406.b).
  */
-static inline register_t vreg_reg_extract(unsigned long reg,
-                                          unsigned int offset,
-                                          enum dabt_size size)
-{
-    reg >>= 8 * offset;
-    reg &= VREG_REG_MASK(size);
-
-    return reg;
-}
-
-static inline void vreg_reg_update(unsigned long *reg, register_t val,
-                                   unsigned int offset,
-                                   enum dabt_size size)
-{
-    unsigned long mask = VREG_REG_MASK(size);
-    int shift = offset * 8;
-
-    *reg &= ~(mask << shift);
-    *reg |= ((unsigned long)val & mask) << shift;
-}
-
-static inline void vreg_reg_setbits(unsigned long *reg, register_t bits,
-                                    unsigned int offset,
-                                    enum dabt_size size)
-{
-    unsigned long mask = VREG_REG_MASK(size);
-    int shift = offset * 8;
-
-    *reg |= ((unsigned long)bits & mask) << shift;
-}
-
-static inline void vreg_reg_clearbits(unsigned long *reg, register_t bits,
-                                      unsigned int offset,
-                                      enum dabt_size size)
-{
-    unsigned long mask = VREG_REG_MASK(size);
-    int shift = offset * 8;
-
-    *reg &= ~(((unsigned long)bits & mask) << shift);
-}
 
 /* N-bit register helpers */
 #define VREG_REG_HELPERS(sz, offmask)                                   \
 static inline register_t vreg_reg##sz##_extract(uint##sz##_t reg,       \
                                                 const mmio_info_t *info)\
 {                                                                       \
-    return vreg_reg_extract(reg, info->gpa & (offmask),                 \
-                            info->dabt.size);                           \
+    unsigned int offset = info->gpa & (offmask);                        \
+                                                                        \
+    reg >>= 8 * offset;                                                 \
+    reg &= VREG_REG_MASK(info->dabt.size);                              \
+                                                                        \
+    return reg;                                                         \
 }                                                                       \
                                                                         \
 static inline void vreg_reg##sz##_update(uint##sz##_t *reg,             \
                                          register_t val,                \
                                          const mmio_info_t *info)       \
 {                                                                       \
-    unsigned long tmp = *reg;                                           \
+    unsigned int offset = info->gpa & (offmask);                        \
+    uint##sz##_t mask = VREG_REG_MASK(info->dabt.size);                 \
+    unsigned int shift = offset * 8;                                    \
                                                                         \
-    vreg_reg_update(&tmp, val, info->gpa & (offmask),                   \
-                    info->dabt.size);                                   \
-                                                                        \
-    *reg = tmp;                                                         \
+    *reg &= ~(mask << shift);                                           \
+    *reg |= ((uint##sz##_t)val & mask) << shift;                        \
 }                                                                       \
                                                                         \
 static inline void vreg_reg##sz##_setbits(uint##sz##_t *reg,            \
                                           register_t bits,              \
                                           const mmio_info_t *info)      \
 {                                                                       \
-    unsigned long tmp = *reg;                                           \
-                                                                        \
-    vreg_reg_setbits(&tmp, bits, info->gpa & (offmask),                 \
-                     info->dabt.size);                                  \
+    unsigned int offset = info->gpa & (offmask);                        \
+    uint##sz##_t mask = VREG_REG_MASK(info->dabt.size);                 \
+    unsigned int shift = offset * 8;                                    \
                                                                         \
-    *reg = tmp;                                                         \
+    *reg |= ((uint##sz##_t)bits & mask) << shift;                       \
 }                                                                       \
                                                                         \
 static inline void vreg_reg##sz##_clearbits(uint##sz##_t *reg,          \
                                             register_t bits,            \
                                             const mmio_info_t *info)    \
 {                                                                       \
-    unsigned long tmp = *reg;                                           \
-                                                                        \
-    vreg_reg_clearbits(&tmp, bits, info->gpa & (offmask),               \
-                       info->dabt.size);                                \
+    unsigned int offset = info->gpa & (offmask);                        \
+    uint##sz##_t mask = VREG_REG_MASK(info->dabt.size);                 \
+    unsigned int shift = offset * 8;                                    \
                                                                         \
-    *reg = tmp;                                                         \
+    *reg &= ~(((uint##sz##_t)bits & mask) << shift);                    \
 }
 
-/*
- * 64 bits registers are only supported on platform with 64-bit long.
- * This is also allow us to optimize the 32 bit case by using
- * unsigned long rather than uint64_t
- */
-#if BITS_PER_LONG == 64
 VREG_REG_HELPERS(64, 0x7);
-#endif
 VREG_REG_HELPERS(32, 0x3);
 
 #undef VREG_REG_HELPERS
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.