[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/3] x86/mem_sharing: make fork_reset more configurable
Alow specify distinct parts of the fork VM to be reset. This is useful when a fuzzing operation involves mapping in only a handful of pages that are known ahead of time. Throwing these pages away just to be re-copied immediately is expensive, thus allowing to specify partial resets can speed things up. Also allow resetting to be initiated from vm_event responses as an optiomization. Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx --- v5: split toolstack part to separate patch and use ASSERT_UNREACHABLE v4: No change v3: Rebase on simpler approach after dropping empty_p2m feature v2: address review comments and add more sanity checking --- xen/arch/x86/include/asm/mem_sharing.h | 9 +++++++++ xen/arch/x86/mm/mem_sharing.c | 24 +++++++++++++++++++----- xen/common/vm_event.c | 18 ++++++++++++++++++ xen/include/public/memory.h | 4 +++- xen/include/public/vm_event.h | 8 ++++++++ 5 files changed, 57 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/include/asm/mem_sharing.h b/xen/arch/x86/include/asm/mem_sharing.h index cf7a12f4d2..2c00069bc9 100644 --- a/xen/arch/x86/include/asm/mem_sharing.h +++ b/xen/arch/x86/include/asm/mem_sharing.h @@ -85,6 +85,9 @@ static inline bool mem_sharing_is_fork(const struct domain *d) int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool unsharing); +int mem_sharing_fork_reset(struct domain *d, bool reset_state, + bool reset_memory); + /* * If called by a foreign domain, possible errors are * -EBUSY -> ring full @@ -148,6 +151,12 @@ static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock) return -EOPNOTSUPP; } +static inline int mem_sharing_fork_reset(struct domain *d, bool reset_state, + bool reset_memory) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __MEM_SHARING_H__ */ diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index a5c16b4429..1e1fb27c1a 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1881,15 +1881,21 @@ static int fork(struct domain *cd, struct domain *d) * footprints the hypercall continuation should be implemented (or if this * feature needs to be become "stable"). */ -static int mem_sharing_fork_reset(struct domain *d) +int mem_sharing_fork_reset(struct domain *d, bool reset_state, + bool reset_memory) { - int rc; + int rc = 0; struct domain *pd = d->parent; struct p2m_domain *p2m = p2m_get_hostp2m(d); struct page_info *page, *tmp; + ASSERT(reset_state || reset_memory); + domain_pause(d); + if ( !reset_memory ) + goto state; + /* need recursive lock because we will free pages */ spin_lock_recursive(&d->page_alloc_lock); page_list_for_each_safe(page, tmp, &d->page_list) @@ -1922,7 +1928,9 @@ static int mem_sharing_fork_reset(struct domain *d) } spin_unlock_recursive(&d->page_alloc_lock); - rc = copy_settings(d, pd); + state: + if ( reset_state ) + rc = copy_settings(d, pd); domain_unpause(d); @@ -2229,15 +2237,21 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) case XENMEM_sharing_op_fork_reset: { + bool reset_state = mso.u.fork.flags & XENMEM_FORK_RESET_STATE; + bool reset_memory = mso.u.fork.flags & XENMEM_FORK_RESET_MEMORY; + rc = -EINVAL; - if ( mso.u.fork.pad || mso.u.fork.flags ) + if ( mso.u.fork.pad || (!reset_state && !reset_memory) ) + goto out; + if ( mso.u.fork.flags & + ~(XENMEM_FORK_RESET_STATE | XENMEM_FORK_RESET_MEMORY) ) goto out; rc = -ENOSYS; if ( !d->parent ) goto out; - rc = mem_sharing_fork_reset(d); + rc = mem_sharing_fork_reset(d, reset_state, reset_memory); break; } diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c index 84cf52636b..cc7d8bf565 100644 --- a/xen/common/vm_event.c +++ b/xen/common/vm_event.c @@ -28,6 +28,11 @@ #include <asm/p2m.h> #include <asm/monitor.h> #include <asm/vm_event.h> + +#ifdef CONFIG_MEM_SHARING +#include <asm/mem_sharing.h> +#endif + #include <xsm/xsm.h> #include <public/hvm/params.h> @@ -394,6 +399,19 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved) if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING ) p2m_mem_paging_resume(d, &rsp); #endif +#ifdef CONFIG_MEM_SHARING + if ( mem_sharing_is_fork(d) ) + { + bool reset_state = rsp.flags & VM_EVENT_FLAG_RESET_FORK_STATE; + bool reset_mem = rsp.flags & VM_EVENT_FLAG_RESET_FORK_MEMORY; + + if ( (reset_state || reset_mem) && + mem_sharing_fork_reset(d, reset_state, reset_mem) ) + { + ASSERT_UNREACHABLE(); + } + } +#endif /* * Check emulation flags in the arch-specific handler only, as it diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index a1a0f0233a..f8d26fb77d 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -541,12 +541,14 @@ struct xen_mem_sharing_op { uint32_t gref; /* IN: gref to debug */ } u; } debug; - struct mem_sharing_op_fork { /* OP_FORK */ + struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */ domid_t parent_domain; /* IN: parent's domain id */ /* Only makes sense for short-lived forks */ #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0) /* Only makes sense for short-lived forks */ #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1) +#define XENMEM_FORK_RESET_STATE (1u << 2) +#define XENMEM_FORK_RESET_MEMORY (1u << 3) uint16_t flags; /* IN: optional settings */ uint32_t pad; /* Must be set to 0 */ } fork; diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h index bb003d21d0..1673bb8703 100644 --- a/xen/include/public/vm_event.h +++ b/xen/include/public/vm_event.h @@ -127,6 +127,14 @@ * Reset the vmtrace buffer (if vmtrace is enabled) */ #define VM_EVENT_FLAG_RESET_VMTRACE (1 << 13) +/* + * Reset the VM state (if VM is fork) + */ +#define VM_EVENT_FLAG_RESET_FORK_STATE (1 << 14) +/* + * Remove unshared entries from physmap (if VM is fork) + */ +#define VM_EVENT_FLAG_RESET_FORK_MEMORY (1 << 15) /* * Reasons for the vm event request -- 2.34.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |