[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/2] xen: make include/xen/unaligned.h usable on all architectures



Instead of defining get_unaligned() and put_unaligned() in a way that
is only supporting architectures allowing unaligned accesses, use the
same approach as the Linux kernel and let the compiler do the
decision how to generate the code for probably unaligned data accesses.

Update include/xen/unaligned.h from include/asm-generic/unaligned.h of
the Linux kernel.

The generated code has been checked to be the same on x86.

Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx>
Origin: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
803f4e1eab7a
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/include/xen/unaligned.h | 121 +++++++++++++++++++++++++-----------
 1 file changed, 86 insertions(+), 35 deletions(-)

diff --git a/xen/include/xen/unaligned.h b/xen/include/xen/unaligned.h
index 0a2b16d05d..325d9f875f 100644
--- a/xen/include/xen/unaligned.h
+++ b/xen/include/xen/unaligned.h
@@ -1,12 +1,4 @@
-/*
- * This header can be used by architectures where unaligned accesses work
- * without faulting, and at least reasonably efficiently.  Other architectures
- * will need to have a custom asm/unaligned.h.
- */
-#ifndef __ASM_UNALIGNED_H__
-#error "xen/unaligned.h should not be included directly - include 
asm/unaligned.h instead"
-#endif
-
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __XEN_UNALIGNED_H__
 #define __XEN_UNALIGNED_H__
 
@@ -15,67 +7,126 @@
 #include <asm/byteorder.h>
 #endif
 
-#define get_unaligned(p) (*(p))
-#define put_unaligned(val, p) (*(p) = (val))
+/*
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
+ */
+
+#define __get_unaligned_t(type, ptr) ({                                        
        \
+       const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);      
\
+       __pptr->x;                                                              
\
+})
+
+#define __put_unaligned_t(type, val, ptr) do {                                 
\
+       struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);            
\
+       __pptr->x = (val);                                                      
\
+} while (0)
+
+#define get_unaligned(ptr)     __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
 
-static inline uint16_t get_unaligned_be16(const void *p)
+static inline u64 get_unaligned_be64(const void *p)
 {
-       return be16_to_cpup(p);
+       return be64_to_cpu(__get_unaligned_t(__be64, p));
 }
 
-static inline void put_unaligned_be16(uint16_t val, void *p)
+static inline void put_unaligned_be16(u16 val, void *p)
 {
-       *(__force __be16*)p = cpu_to_be16(val);
+       __put_unaligned_t(__be16, cpu_to_be16(val), p);
 }
 
-static inline uint32_t get_unaligned_be32(const void *p)
+static inline void put_unaligned_be32(u32 val, void *p)
 {
-       return be32_to_cpup(p);
+       __put_unaligned_t(__be32, cpu_to_be32(val), p);
 }
 
-static inline void put_unaligned_be32(uint32_t val, void *p)
+static inline void put_unaligned_be64(u64 val, void *p)
 {
-       *(__force __be32*)p = cpu_to_be32(val);
+       __put_unaligned_t(__be64, cpu_to_be64(val), p);
 }
 
-static inline uint64_t get_unaligned_be64(const void *p)
+static inline u32 __get_unaligned_be24(const u8 *p)
 {
-       return be64_to_cpup(p);
+       return p[0] << 16 | p[1] << 8 | p[2];
 }
 
-static inline void put_unaligned_be64(uint64_t val, void *p)
+static inline u32 get_unaligned_be24(const void *p)
 {
-       *(__force __be64*)p = cpu_to_be64(val);
+       return __get_unaligned_be24(p);
 }
 
-static inline uint16_t get_unaligned_le16(const void *p)
+static inline u32 __get_unaligned_le24(const u8 *p)
 {
-       return le16_to_cpup(p);
+       return p[0] | p[1] << 8 | p[2] << 16;
 }
 
-static inline void put_unaligned_le16(uint16_t val, void *p)
+static inline u32 get_unaligned_le24(const void *p)
 {
-       *(__force __le16*)p = cpu_to_le16(val);
+       return __get_unaligned_le24(p);
 }
 
-static inline uint32_t get_unaligned_le32(const void *p)
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
 {
-       return le32_to_cpup(p);
+       *p++ = val >> 16;
+       *p++ = val >> 8;
+       *p++ = val;
 }
 
-static inline void put_unaligned_le32(uint32_t val, void *p)
+static inline void put_unaligned_be24(const u32 val, void *p)
 {
-       *(__force __le32*)p = cpu_to_le32(val);
+       __put_unaligned_be24(val, p);
 }
 
-static inline uint64_t get_unaligned_le64(const void *p)
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
 {
-       return le64_to_cpup(p);
+       *p++ = val;
+       *p++ = val >> 8;
+       *p++ = val >> 16;
 }
 
-static inline void put_unaligned_le64(uint64_t val, void *p)
+static inline void put_unaligned_le24(const u32 val, void *p)
 {
-       *(__force __le64*)p = cpu_to_le64(val);
+       __put_unaligned_le24(val, p);
 }
 
 #endif /* __XEN_UNALIGNED_H__ */
-- 
2.35.3




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.