|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4] common/vm_event: Initialize vm_event lists on domain creation
On Mon, Aug 28, 2017 at 4:54 AM, Alexandru Isaila
<aisaila@xxxxxxxxxxxxxxx> wrote:
> The patch splits the vm_event into three structures:vm_event_share,
> vm_event_paging, vm_event_monitor. The allocation for the
> structure is moved to vm_event_enable so that it can be
> allocated/init when needed and freed in vm_event_disable.
>
> Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
>
> ---
> Changes since V3:
> - Moved the d->vm_event_paging to the if below in the
> assign_device function
>
> Note: Did not test on arm, compliled on arm and x86.
> ---
> xen/arch/arm/mem_access.c | 2 +-
> xen/arch/x86/mm/mem_access.c | 2 +-
> xen/arch/x86/mm/mem_paging.c | 2 +-
> xen/arch/x86/mm/mem_sharing.c | 4 +-
> xen/arch/x86/mm/p2m.c | 10 +--
> xen/common/domain.c | 13 ++--
> xen/common/mem_access.c | 2 +-
> xen/common/monitor.c | 4 +-
> xen/common/vm_event.c | 156
> +++++++++++++++++++++++++-----------------
> xen/drivers/passthrough/pci.c | 2 +-
> xen/include/xen/sched.h | 18 ++---
> 11 files changed, 124 insertions(+), 91 deletions(-)
>
> diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
> index e0888bb..a7f0cae 100644
> --- a/xen/arch/arm/mem_access.c
> +++ b/xen/arch/arm/mem_access.c
> @@ -256,7 +256,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
> const struct npfec npfec)
> }
>
> /* Otherwise, check if there is a vm_event monitor subscriber */
> - if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
> + if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
> {
> /* No listener */
> if ( p2m->access_required )
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index 5adaf6d..414e38f 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -179,7 +179,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
> gla,
> gfn_unlock(p2m, gfn, 0);
>
> /* Otherwise, check if there is a memory event listener, and send the
> message along */
> - if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
> + if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
> {
> /* No listener */
> if ( p2m->access_required )
> diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
> index a049e0d..20214ac 100644
> --- a/xen/arch/x86/mm/mem_paging.c
> +++ b/xen/arch/x86/mm/mem_paging.c
> @@ -43,7 +43,7 @@ int
> mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
> goto out;
>
> rc = -ENODEV;
> - if ( unlikely(!d->vm_event->paging.ring_page) )
> + if ( !d->vm_event_paging || unlikely(!d->vm_event_paging->ring_page) )
So you are adding an extra NULL check here for d->vm_event_paging.
Perhaps now we should have that NULL check used in all cases by adding
it to vm_event_check_ring and we should use that across all cases..
> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
> index 19f63bb..10ea4ae 100644
> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
> goto out;
>
> rc = -ENODEV;
> - if ( unlikely(!d->vm_event->monitor.ring_page) )
> + if ( !d->vm_event_monitor || unlikely(!d->vm_event_monitor->ring_page) )
... like here ^ ...
> @@ -187,41 +194,44 @@ void vm_event_wake(struct domain *d, struct
> vm_event_domain *ved)
> vm_event_wake_blocked(d, ved);
> }
>
> -static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
> +static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
> {
> - if ( ved->ring_page )
> + if ( *ved && (*ved)->ring_page )
... and here ^ ...
> @@ -267,6 +277,9 @@ void vm_event_put_request(struct domain *d,
> RING_IDX req_prod;
> struct vcpu *curr = current;
>
> + if( !ved )
... and here ^ ...
> + return;
> +
> if ( curr->domain != d )
> {
> req->flags |= VM_EVENT_FLAG_FOREIGN;
> @@ -434,6 +447,9 @@ void vm_event_resume(struct domain *d, struct
> vm_event_domain *ved)
>
> void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
> {
> + if( !ved )
... and here ^ ...
> + return;
> +
> vm_event_ring_lock(ved);
> vm_event_release_slot(d, ved);
> vm_event_ring_unlock(ved);
> @@ -500,6 +516,9 @@ bool_t vm_event_check_ring(struct vm_event_domain *ved)
> int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
> bool_t allow_sleep)
> {
> + if ( !ved )
... and here ^ ...
> + return -EOPNOTSUPP;
> +
> if ( (current->domain == d) && allow_sleep )
> return vm_event_wait_slot(ved);
> else
> @@ -510,24 +529,30 @@ int __vm_event_claim_slot(struct domain *d, struct
> vm_event_domain *ved,
> /* Registered with Xen-bound event channel for incoming notifications. */
> static void mem_paging_notification(struct vcpu *v, unsigned int port)
> {
> - if ( likely(v->domain->vm_event->paging.ring_page != NULL) )
> - vm_event_resume(v->domain, &v->domain->vm_event->paging);
> + struct domain *domain = v->domain;
> +
> + if ( likely(domain->vm_event_paging->ring_page != NULL) )
... and here ^ ...
> @@ -599,7 +624,6 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> #ifdef CONFIG_HAS_MEM_PAGING
> case XEN_DOMCTL_VM_EVENT_OP_PAGING:
> {
> - struct vm_event_domain *ved = &d->vm_event->paging;
> rc = -EINVAL;
>
> switch( vec->op )
> @@ -629,24 +653,28 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> break;
>
> /* domain_pause() not required here, see XSA-99 */
> - rc = vm_event_enable(d, vec, ved, _VPF_mem_paging,
> + rc = vm_event_enable(d, vec, &d->vm_event_paging,
> _VPF_mem_paging,
> HVM_PARAM_PAGING_RING_PFN,
> mem_paging_notification);
> }
> break;
>
> case XEN_VM_EVENT_DISABLE:
> - if ( ved->ring_page )
> + if ( !d->vm_event_paging )
> + break;
> + if ( d->vm_event_paging->ring_page )
... and here ^ ...
> {
> domain_pause(d);
> - rc = vm_event_disable(d, ved);
> + rc = vm_event_disable(d, &d->vm_event_paging);
> domain_unpause(d);
> }
> break;
>
> case XEN_VM_EVENT_RESUME:
> - if ( ved->ring_page )
> - vm_event_resume(d, ved);
> + if ( !d->vm_event_paging )
> + break;
> + if ( d->vm_event_paging->ring_page )
> + vm_event_resume(d, d->vm_event_paging);
... and here ^ ...
> @@ -671,24 +698,28 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> rc = arch_monitor_init_domain(d);
> if ( rc )
> break;
> - rc = vm_event_enable(d, vec, ved, _VPF_mem_access,
> + rc = vm_event_enable(d, vec, &d->vm_event_monitor,
> _VPF_mem_access,
> HVM_PARAM_MONITOR_RING_PFN,
> monitor_notification);
> break;
>
> case XEN_VM_EVENT_DISABLE:
> - if ( ved->ring_page )
> + if ( !d->vm_event_monitor )
> + break;
> + if ( d->vm_event_monitor->ring_page )
... and here ^ ...
> {
> domain_pause(d);
> - rc = vm_event_disable(d, ved);
> + rc = vm_event_disable(d, &d->vm_event_monitor);
> arch_monitor_cleanup_domain(d);
> domain_unpause(d);
> }
> break;
>
> case XEN_VM_EVENT_RESUME:
> - if ( ved->ring_page )
> - vm_event_resume(d, ved);
> + if ( !d->vm_event_monitor )
> + break;
> + if ( d->vm_event_monitor->ring_page )
> + vm_event_resume(d, d->vm_event_monitor);
... and here ^ ...
> @@ -720,23 +750,27 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> break;
>
> /* domain_pause() not required here, see XSA-99 */
> - rc = vm_event_enable(d, vec, ved, _VPF_mem_sharing,
> + rc = vm_event_enable(d, vec, &d->vm_event_share,
> _VPF_mem_sharing,
> HVM_PARAM_SHARING_RING_PFN,
> mem_sharing_notification);
> break;
>
> case XEN_VM_EVENT_DISABLE:
> - if ( ved->ring_page )
> + if ( !d->vm_event_share )
> + break;
> + if ( d->vm_event_share->ring_page )
... and here ^ ... and any other place where I perhaps missed it.
Thanks,
Tamas
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |