|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 04/26] xen: consolidate CONFIG_VM_EVENT
On 10.09.2025 09:38, Penny Zheng wrote:
> @@ -2456,9 +2460,13 @@ static struct hvm_function_table __initdata_cf_clobber
> svm_function_table = {
> .fpu_dirty_intercept = svm_fpu_dirty_intercept,
> .msr_read_intercept = svm_msr_read_intercept,
> .msr_write_intercept = svm_msr_write_intercept,
> +#ifdef CONFIG_VM_EVENT
> .enable_msr_interception = svm_enable_msr_interception,
> +#endif
> .set_rdtsc_exiting = svm_set_rdtsc_exiting,
> +#ifdef CONFIG_VM_EVENT
> .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
> +#endif
I think in such a case it would be preferable to move one of the existing
lines, so we can get away with just a single #ifdef.
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -192,7 +192,9 @@ struct hvm_function_table {
> void (*handle_cd)(struct vcpu *v, unsigned long value);
> void (*set_info_guest)(struct vcpu *v);
> void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
> +#ifdef CONFIG_VM_EVENT
> void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
> +#endif
>
> /* Nested HVM */
> int (*nhvm_vcpu_initialise)(struct vcpu *v);
> @@ -224,7 +226,9 @@ struct hvm_function_table {
> paddr_t *L1_gpa, unsigned int *page_order,
> uint8_t *p2m_acc, struct npfec npfec);
>
> +#ifdef CONFIG_VM_EVENT
> void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> +#endif
Possibly same here.
> @@ -435,7 +439,11 @@ static inline bool using_svm(void)
>
> static inline bool hvm_has_set_descriptor_access_exiting(void)
> {
> +#ifdef CONFIG_VM_EVENT
> return hvm_funcs.set_descriptor_access_exiting;
> +#else
> + return false;
> +#endif
> }
This is actively wrong. It being only monitor.[ch] which use the function,
I don't see why it can't just be wrapped in an #ifdef. With what you do,
some new caller might function fine until run in a VM_EVENT=n build.
> @@ -681,7 +689,9 @@ static inline int nhvm_hap_walk_L1_p2m(
>
> static inline void hvm_enable_msr_interception(struct domain *d, uint32_t
> msr)
> {
> +#ifdef CONFIG_VM_EVENT
> alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
> +#endif
> }
Mostly the same here.
> --- a/xen/arch/x86/include/asm/hvm/monitor.h
> +++ b/xen/arch/x86/include/asm/hvm/monitor.h
> @@ -17,14 +17,16 @@ enum hvm_monitor_debug_type
> HVM_MONITOR_DEBUG_EXCEPTION,
> };
>
> +#define hvm_monitor_crX(cr, new, old) \
> + hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
> +
> +#ifdef CONFIG_VM_EVENT
> /*
> * Called for current VCPU on crX/MSR changes by guest. Bool return signals
> * whether emulation should be postponed.
> */
> bool hvm_monitor_cr(unsigned int index, unsigned long value,
> unsigned long old);
> -#define hvm_monitor_crX(cr, new, old) \
> - hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
> bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t
> old_value);
> void hvm_monitor_descriptor_access(uint64_t exit_info,
> uint64_t vmx_exit_qualification,
> @@ -45,6 +47,65 @@ int hvm_monitor_vmexit(unsigned long exit_reason,
>
> int hvm_monitor_io(unsigned int port, unsigned int bytes,
> bool in, bool str);
> +#else
> +static inline bool hvm_monitor_cr(unsigned int index, unsigned long value,
> + unsigned long old)
> +{
> + return false;
> +}
> +
> +static inline bool hvm_monitor_msr(unsigned int msr, uint64_t new_value,
> + uint64_t old_value)
> +{
> + return false;
> +}
> +
> +static inline void hvm_monitor_descriptor_access(uint64_t exit_info,
> + uint64_t vmx_exit_qualification,
> + uint8_t descriptor, bool is_write) {}
> +
> +static inline int hvm_monitor_debug(unsigned long rip,
> + enum hvm_monitor_debug_type type,
> + unsigned int trap_type,
> + unsigned int insn_length,
> + unsigned int pending_dbg)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int hvm_monitor_cpuid(unsigned long insn_length,
> + unsigned int leaf, unsigned int subleaf)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline void hvm_monitor_interrupt(unsigned int vector,
> + unsigned int type,
> + unsigned int err, uint64_t cr2) {}
> +
> +static inline bool hvm_monitor_emul_unimplemented(void)
> +{
> + return false;
> +}
> +
> +static inline bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn,
> + uint32_t pfec, uint16_t kind)
> +{
> + return false;
> +}
> +
> +static inline int hvm_monitor_vmexit(unsigned long exit_reason,
> + unsigned long exit_qualification)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int hvm_monitor_io(unsigned int port, unsigned int bytes,
> + bool in, bool str)
> +{
> + return -EOPNOTSUPP;
> +}
For this one it's perhaps easiest to see that -EOPNOTSUPP (or in fact any
negative value) is wrong to return from the stub: Just go look at both
use sites. Guests wouldn't be able to use I/O insns anymore for intercepted
ports. Others look to have similar issues, while the ones returning "false"
look okay.
> --- a/xen/include/xen/mem_access.h
> +++ b/xen/include/xen/mem_access.h
> @@ -33,9 +33,7 @@
> */
> struct vm_event_st;
>
> -#ifdef CONFIG_VM_EVENT
> #include <asm/mem_access.h>
> -#endif
Aiui this breaks the build on PPC and RISC-V, which don't have such a
header. If this change is really needed (which I'm not convinced of, as
x86's hvm/hvm.c could as well include asm/mem_access.h directly), you'll
need to use has_include() here.
> @@ -74,6 +72,7 @@ typedef enum {
> } p2m_access_t;
>
> struct p2m_domain;
> +#ifdef CONFIG_VM_EVENT
> bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
> xenmem_access_t xaccess,
> p2m_access_t *paccess);
> @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
> int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
> unsigned int altp2m_idx);
>
> -#ifdef CONFIG_VM_EVENT
> int mem_access_memop(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
> #else
> +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
> + xenmem_access_t xaccess,
> + p2m_access_t *paccess)
> +{
> + return false;
> +}
So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a configuration
which makes sense?
> +static inline long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t
> nr,
> + uint32_t start, uint32_t mask,
> + xenmem_access_t access,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline long p2m_set_mem_access_multi(struct domain *d,
> + const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> + const XEN_GUEST_HANDLE(const_uint8) access_list,
> + uint32_t nr, uint32_t start, uint32_t mask,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int p2m_get_mem_access(struct domain *d, gfn_t gfn,
> + xenmem_access_t *access,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
Instead of these, I wonder whether a single #ifdef in do_altp2m_op()
wouldn't be more appropriate (assuming the above config makes some sense
in the first place). Actually, it would need to be two #ifdef-s, one in
each of the two switch() blocks.
> --- a/xen/include/xen/monitor.h
> +++ b/xen/include/xen/monitor.h
> @@ -30,6 +30,7 @@ struct xen_domctl_monitor_op;
> #ifdef CONFIG_VM_EVENT
> int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop);
> void monitor_guest_request(void);
> +int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
> #else /* !CONFIG_VM_EVENT */
> static inline int monitor_domctl(struct domain *d,
> struct xen_domctl_monitor_op *mop)
> @@ -37,8 +38,11 @@ static inline int monitor_domctl(struct domain *d,
> return -EOPNOTSUPP;
> }
> static inline void monitor_guest_request(void) {}
> +static inline int monitor_traps(struct vcpu *v, bool sync,
> + vm_event_request_t *req)
> +{
> + return -EOPNOTSUPP;
> +}
Is this needed? There's only one call that needs taking care of afaics,
in hvm_hap_nested_page_fault(). That's gated on "req_ptr" being non-NULL
though, which isn't possible when p2m_mem_access_check() also is a stub.
Hence the compiler ought to be able to DCE the call.
> --- a/xen/include/xen/vm_event.h
> +++ b/xen/include/xen/vm_event.h
> @@ -50,6 +50,7 @@ struct vm_event_domain
> unsigned int last_vcpu_wake_up;
> };
>
> +#ifdef CONFIG_VM_EVENT
> /* Returns whether a ring has been set up */
> bool vm_event_check_ring(struct vm_event_domain *ved);
>
> @@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain *ved);
> */
> int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
> bool allow_sleep);
> +#else
> +static inline bool vm_event_check_ring(struct vm_event_domain *ved)
> +{
> + return false;
> +}
Which call site is in need of this stub? I was first considering
mem_paging_enabled(), but MEM_PAGING already now depends on VM_EVENT.
> +static inline int __vm_event_claim_slot(struct domain *d,
> + struct vm_event_domain *ved,
> + bool allow_sleep)
> +{
> + return -EOPNOTSUPP;
> +}
Sadly this looks to be needed when MEM_SHARING=y and VM_EVENT=n.
> @@ -82,23 +97,28 @@ static inline int vm_event_claim_slot_nosleep(struct
> domain *d,
>
> void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
>
> +#ifdef CONFIG_VM_EVENT
> void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
> vm_event_request_t *req);
>
> -#ifdef CONFIG_VM_EVENT
> /* Clean up on domain destruction */
> void vm_event_cleanup(struct domain *d);
> int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
> +
> +void vm_event_vcpu_pause(struct vcpu *v);
> #else /* !CONFIG_VM_EVENT */
> +static inline void vm_event_put_request(struct domain *d,
> + struct vm_event_domain *ved,
> + vm_event_request_t *req) {}
Same here and ...
> static inline void vm_event_cleanup(struct domain *d) {}
> static inline int vm_event_domctl(struct domain *d,
> struct xen_domctl_vm_event_op *vec)
> {
> return -EOPNOTSUPP;
> }
> +static inline void vm_event_vcpu_pause(struct vcpu *v) {};
... here.
> #endif /* !CONFIG_VM_EVENT */
>
> -void vm_event_vcpu_pause(struct vcpu *v);
> void vm_event_vcpu_unpause(struct vcpu *v);
Please move vm_event_vcpu_unpause() as well (without adding a stub). The
two would better stay together.
Jan
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |