|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN][PATCH v4] xen/x86: guest_access: optimize raw_x_guest() for PV and HVM combinations
From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
Xen uses below pattern for raw_x_guest() functions:
define raw_copy_to_guest(dst, src, len) \
(is_hvm_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
copy_to_guest_pv(dst, src, len))
This pattern works depending on CONFIG_PV/CONFIG_HVM as:
- PV=y and HVM=y
Proper guest access function is selected depending on domain type.
- PV=y and HVM=n
Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
and compiler will optimize code and skip HVM specific part.
- PV=n and HVM=y
Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
No PV specific code will be optimized by compiler.
- PV=n and HVM=n
No guests should possible. The code will still follow PV path.
Rework raw_x_guest() code to use static inline functions which account for
above PV/HVM possible configurations with main intention to optimize code
for (PV=n and HVM=y) case.
For the case (PV=n and HVM=n) return "len" value indicating a failure (no
guests should be possible in this case, which means no access to guest
memory should ever happen).
Finally move arch/x86/usercopy.c into arch/x86/pv/usercopy.c to use it only
with PV=y.
The measured (bloat-o-meter) improvement for (PV=n and HVM=y) case is:
add/remove: 3/8 grow/shrink: 3/89 up/down: 1018/-12087 (-11069)
Total: Before=1937280, After=1926211, chg -0.57%
[teddy.astie@xxxxxxxxxx: Suggested to use static inline functions vs
macro combinations]
Suggested-by: Teddy Astie <teddy.astie@xxxxxxxxxx>
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
Reviewed-by: Jason Andryuk <jason.andryuk@xxxxxxx>
---
changes in v4:
- move usercopy.c into arch/x86/pv/
- rework to always dynamically check for HVM vcpu(domain) by using is_hvm_vcpu()
as requested by Jan Beulich
changes in v3:
- add raw_use_hvm_access() wrapper
changes in v2:
- use static inline functions instead of macro combinations
xen/arch/x86/Makefile | 4 --
xen/arch/x86/include/asm/guest_access.h | 78 ++++++++++++++++++-------
xen/arch/x86/pv/Makefile | 4 ++
xen/arch/x86/{ => pv}/usercopy.c | 0
4 files changed, 62 insertions(+), 24 deletions(-)
rename xen/arch/x86/{ => pv}/usercopy.c (100%)
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 407571c510e1..6e2b17471719 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -71,7 +71,6 @@ obj-y += time.o
obj-y += traps-setup.o
obj-y += traps.o
obj-$(CONFIG_INTEL) += tsx.o
-obj-y += usercopy.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
obj-y += hpet.o
@@ -92,9 +91,6 @@ hostprogs-y += efi/mkreloc
$(obj)/efi/mkreloc: HOSTCFLAGS += -I$(srctree)/include
-# Allows usercopy.c to include itself
-$(obj)/usercopy.o: CFLAGS-y += -iquote .
-
ifneq ($(CONFIG_HVM),y)
$(obj)/x86_emulate.o: CFLAGS-y += -Wno-unused-label
endif
diff --git a/xen/arch/x86/include/asm/guest_access.h
b/xen/arch/x86/include/asm/guest_access.h
index 69716c8b41bb..f0e56b112e14 100644
--- a/xen/arch/x86/include/asm/guest_access.h
+++ b/xen/arch/x86/include/asm/guest_access.h
@@ -13,26 +13,64 @@
#include <asm/hvm/guest_access.h>
/* Raw access functions: no type checking. */
-#define raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_to_user_hvm((dst), (src), (len)) : \
- copy_to_guest_pv(dst, src, len))
-#define raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_from_user_hvm((dst), (src), (len)) : \
- copy_from_guest_pv(dst, src, len))
-#define raw_clear_guest(dst, len) \
- (is_hvm_vcpu(current) ? \
- clear_user_hvm((dst), (len)) : \
- clear_guest_pv(dst, len))
-#define __raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_to_user_hvm((dst), (src), (len)) : \
- __copy_to_guest_pv(dst, src, len))
-#define __raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_from_user_hvm((dst), (src), (len)) : \
- __copy_from_guest_pv(dst, src, len))
+static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_to_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_copy_from_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_from_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return copy_from_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_clear_guest(void *dst, unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return clear_user_hvm(dst, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return clear_guest_pv(dst, len);
+}
+
+static inline unsigned int __raw_copy_to_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_to_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return __copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int __raw_copy_from_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_from_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return __copy_from_guest_pv(dst, src, len);
+}
/*
* Pre-validate a guest handle.
diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index 6cda354cc41f..59489cd75af6 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -14,6 +14,10 @@ obj-y += ro-page-fault.o
obj-$(CONFIG_PV_SHIM) += shim.o
obj-$(CONFIG_TRACEBUFFER) += trace.o
obj-y += traps.o
+obj-$(CONFIG_PV) += usercopy.o
obj-bin-y += dom0_build.init.o
obj-bin-y += gpr_switch.o
+
+# Allows usercopy.c to include itself
+$(obj)/usercopy.o: CFLAGS-y += -iquote .
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/pv/usercopy.c
similarity index 100%
rename from xen/arch/x86/usercopy.c
rename to xen/arch/x86/pv/usercopy.c
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |