[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] introduce unaligned.h



commit 7c9f81687ad611515474b1c17afc2f79f19faef5
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Jan 18 12:09:13 2021 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Jan 18 12:09:13 2021 +0100

    introduce unaligned.h
    
    Rather than open-coding commonly used constructs in yet more places when
    pulling in zstd decompression support (and its xxhash prereq), pull out
    the custom bits into a commonly used header (for the hypervisor build;
    the tool stack and stubdom builds of libxenguest will still remain in
    need of similarly taking care of). For now this is limited to x86, where
    custom logic isn't needed (considering this is going to be used in init
    code only, even using alternatives patching to use MOVBE doesn't seem
    worthwhile).
    
    For Arm64 with CONFIG_ACPI=y (due to efi-dom0.c's re-use of xz/crc32.c)
    drop the not really necessary inclusion of xz's private.h.
    
    No change in generated code.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/common/lz4/defs.h           |  9 +++--
 xen/common/lzo.c                |  7 ++--
 xen/common/unlzo.c              | 19 +++-------
 xen/common/xz/crc32.c           |  2 --
 xen/common/xz/private.h         | 23 ++++--------
 xen/include/asm-x86/unaligned.h |  6 ++++
 xen/include/xen/unaligned.h     | 79 +++++++++++++++++++++++++++++++++++++++++
 7 files changed, 104 insertions(+), 41 deletions(-)

diff --git a/xen/common/lz4/defs.h b/xen/common/lz4/defs.h
index d886a4e122..4fbea2ac3d 100644
--- a/xen/common/lz4/defs.h
+++ b/xen/common/lz4/defs.h
@@ -10,18 +10,21 @@
 
 #ifdef __XEN__
 #include <asm/byteorder.h>
-#endif
+#include <asm/unaligned.h>
+#else
 
-static inline u16 INIT get_unaligned_le16(const void *p)
+static inline u16 get_unaligned_le16(const void *p)
 {
        return le16_to_cpup(p);
 }
 
-static inline u32 INIT get_unaligned_le32(const void *p)
+static inline u32 get_unaligned_le32(const void *p)
 {
        return le32_to_cpup(p);
 }
 
+#endif
+
 /*
  * Detects 64 bits mode
  */
diff --git a/xen/common/lzo.c b/xen/common/lzo.c
index f4c0ad8530..e4841794f4 100644
--- a/xen/common/lzo.c
+++ b/xen/common/lzo.c
@@ -97,13 +97,12 @@
 #ifdef __XEN__
 #include <xen/lib.h>
 #include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#else
+#define get_unaligned_le16(_p) (*(u16 *)(_p))
 #endif
 
 #include <xen/lzo.h>
-#define get_unaligned(_p) (*(_p))
-#define put_unaligned(_val,_p) (*(_p)=_val)
-#define get_unaligned_le16(_p) (*(u16 *)(_p))
-#define get_unaligned_le32(_p) (*(u32 *)(_p))
 
 #include "decompress.h"
 
diff --git a/xen/common/unlzo.c b/xen/common/unlzo.c
index 5ae6cf911e..11f64fcf3b 100644
--- a/xen/common/unlzo.c
+++ b/xen/common/unlzo.c
@@ -34,30 +34,19 @@
 
 #ifdef __XEN__
 #include <asm/byteorder.h>
-#endif
+#include <asm/unaligned.h>
+#else
 
-#if 1 /* ndef CONFIG_??? */
-static inline u16 INIT get_unaligned_be16(void *p)
+static inline u16 get_unaligned_be16(const void *p)
 {
        return be16_to_cpup(p);
 }
 
-static inline u32 INIT get_unaligned_be32(void *p)
+static inline u32 get_unaligned_be32(const void *p)
 {
        return be32_to_cpup(p);
 }
-#else
-#include <asm/unaligned.h>
-
-static inline u16 INIT get_unaligned_be16(void *p)
-{
-       return be16_to_cpu(__get_unaligned(p, 2));
-}
 
-static inline u32 INIT get_unaligned_be32(void *p)
-{
-       return be32_to_cpu(__get_unaligned(p, 4));
-}
 #endif
 
 static const unsigned char lzop_magic[] = {
diff --git a/xen/common/xz/crc32.c b/xen/common/xz/crc32.c
index af08ae2cf6..0708b61638 100644
--- a/xen/common/xz/crc32.c
+++ b/xen/common/xz/crc32.c
@@ -15,8 +15,6 @@
  * but they are bigger and use more memory for the lookup table.
  */
 
-#include "private.h"
-
 XZ_EXTERN uint32_t INITDATA xz_crc32_table[256];
 
 XZ_EXTERN void INIT xz_crc32_init(void)
diff --git a/xen/common/xz/private.h b/xen/common/xz/private.h
index 7ea2489229..511343fcc2 100644
--- a/xen/common/xz/private.h
+++ b/xen/common/xz/private.h
@@ -13,34 +13,23 @@
 #ifdef __XEN__
 #include <xen/kernel.h>
 #include <asm/byteorder.h>
-#endif
-
-#define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#include <asm/unaligned.h>
+#else
 
-#if 1 /* ndef CONFIG_??? */
-static inline u32 INIT get_unaligned_le32(void *p)
+static inline u32 get_unaligned_le32(const void *p)
 {
        return le32_to_cpup(p);
 }
 
-static inline void INIT put_unaligned_le32(u32 val, void *p)
+static inline void put_unaligned_le32(u32 val, void *p)
 {
        *(__force __le32*)p = cpu_to_le32(val);
 }
-#else
-#include <asm/unaligned.h>
-
-static inline u32 INIT get_unaligned_le32(void *p)
-{
-       return le32_to_cpu(__get_unaligned(p, 4));
-}
 
-static inline void INIT put_unaligned_le32(u32 val, void *p)
-{
-       __put_unaligned(cpu_to_le32(val), p, 4);
-}
 #endif
 
+#define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+
 #define false 0
 #define true 1
 
diff --git a/xen/include/asm-x86/unaligned.h b/xen/include/asm-x86/unaligned.h
new file mode 100644
index 0000000000..6070801d4a
--- /dev/null
+++ b/xen/include/asm-x86/unaligned.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_UNALIGNED_H__
+#define __ASM_UNALIGNED_H__
+
+#include <xen/unaligned.h>
+
+#endif /* __ASM_UNALIGNED_H__ */
diff --git a/xen/include/xen/unaligned.h b/xen/include/xen/unaligned.h
new file mode 100644
index 0000000000..eef7ec73b6
--- /dev/null
+++ b/xen/include/xen/unaligned.h
@@ -0,0 +1,79 @@
+/*
+ * This header can be used by architectures where unaligned accesses work
+ * without faulting, and at least reasonably efficiently.  Other architectures
+ * will need to have a custom asm/unaligned.h.
+ */
+#ifndef __ASM_UNALIGNED_H__
+#error "xen/unaligned.h should not be included directly - include 
asm/unaligned.h instead"
+#endif
+
+#ifndef __XEN_UNALIGNED_H__
+#define __XEN_UNALIGNED_H__
+
+#include <xen/types.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(p) (*(p))
+#define put_unaligned(val, p) (*(p) = (val))
+
+static inline uint16_t get_unaligned_be16(const void *p)
+{
+       return be16_to_cpup(p);
+}
+
+static inline void put_unaligned_be16(uint16_t val, void *p)
+{
+       *(__force __be16*)p = cpu_to_be16(val);
+}
+
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+       return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(uint32_t val, void *p)
+{
+       *(__force __be32*)p = cpu_to_be32(val);
+}
+
+static inline uint64_t get_unaligned_be64(const void *p)
+{
+       return be64_to_cpup(p);
+}
+
+static inline void put_unaligned_be64(uint64_t val, void *p)
+{
+       *(__force __be64*)p = cpu_to_be64(val);
+}
+
+static inline uint16_t get_unaligned_le16(const void *p)
+{
+       return le16_to_cpup(p);
+}
+
+static inline void put_unaligned_le16(uint16_t val, void *p)
+{
+       *(__force __le16*)p = cpu_to_le16(val);
+}
+
+static inline uint32_t get_unaligned_le32(const void *p)
+{
+       return le32_to_cpup(p);
+}
+
+static inline void put_unaligned_le32(uint32_t val, void *p)
+{
+       *(__force __le32*)p = cpu_to_le32(val);
+}
+
+static inline uint64_t get_unaligned_le64(const void *p)
+{
+       return le64_to_cpup(p);
+}
+
+static inline void put_unaligned_le64(uint64_t val, void *p)
+{
+       *(__force __le64*)p = cpu_to_le64(val);
+}
+
+#endif /* __XEN_UNALIGNED_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.