|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 02/10] vm_event: Remove "ring" suffix from vm_event_check_ring
Decouple implementation from interface to allow vm_event_check to be
used regardless of the vm_event underlying implementation.
Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
---
xen/arch/arm/mem_access.c | 2 +-
xen/arch/x86/mm/mem_access.c | 4 ++--
xen/arch/x86/mm/mem_paging.c | 2 +-
xen/common/mem_access.c | 2 +-
xen/common/vm_event.c | 24 ++++++++++++------------
xen/drivers/passthrough/pci.c | 2 +-
xen/include/xen/vm_event.h | 4 ++--
7 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index 3e36202..d54760b 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -290,7 +290,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const
struct npfec npfec)
}
/* Otherwise, check if there is a vm_event monitor subscriber */
- if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
+ if ( !vm_event_check(v->domain->vm_event_monitor) )
{
/* No listener */
if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92..640352e 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -182,7 +182,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the
message along */
- if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
+ if ( !vm_event_check(d->vm_event_monitor) || !req_ptr )
{
/* No listener */
if ( p2m->access_required )
@@ -210,7 +210,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
return true;
}
}
- if ( vm_event_check_ring(d->vm_event_monitor) &&
+ if ( vm_event_check(d->vm_event_monitor) &&
d->arch.monitor.inguest_pagefault_disabled &&
npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
{
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index 54a94fa..dc2a59a 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -44,7 +44,7 @@ int
mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_paging)) )
+ if ( unlikely(!vm_event_check(d->vm_event_paging)) )
goto out;
switch( mpo.op )
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 010e6f8..51e4e2b 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
+ if ( unlikely(!vm_event_check(d->vm_event_monitor)) )
goto out;
switch ( mao.op )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 56b506a..515a917 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -179,7 +179,7 @@ static int vm_event_disable(struct domain *d, struct
vm_event_domain **p_ved)
{
struct vm_event_domain *ved = *p_ved;
- if ( vm_event_check_ring(ved) )
+ if ( vm_event_check(ved) )
{
struct vcpu *v;
@@ -259,7 +259,7 @@ void vm_event_put_request(struct domain *d,
RING_IDX req_prod;
struct vcpu *curr = current;
- if( !vm_event_check_ring(ved) )
+ if( !vm_event_check(ved) )
return;
if ( curr->domain != d )
@@ -362,7 +362,7 @@ static int vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
*/
ASSERT(d != current->domain);
- if ( unlikely(!vm_event_check_ring(ved)) )
+ if ( unlikely(!vm_event_check(ved)) )
return -ENODEV;
/* Pull all responses off the ring. */
@@ -433,7 +433,7 @@ static int vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
{
- if( !vm_event_check_ring(ved) )
+ if( !vm_event_check(ved) )
return;
spin_lock(&ved->lock);
@@ -488,7 +488,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
return rc;
}
-bool vm_event_check_ring(struct vm_event_domain *ved)
+bool vm_event_check(struct vm_event_domain *ved)
{
return ved && ved->ring_page;
}
@@ -508,7 +508,7 @@ bool vm_event_check_ring(struct vm_event_domain *ved)
int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
bool allow_sleep)
{
- if ( !vm_event_check_ring(ved) )
+ if ( !vm_event_check(ved) )
return -EOPNOTSUPP;
if ( (current->domain == d) && allow_sleep )
@@ -543,7 +543,7 @@ static void mem_sharing_notification(struct vcpu *v,
unsigned int port)
void vm_event_cleanup(struct domain *d)
{
#ifdef CONFIG_HAS_MEM_PAGING
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
/* Destroying the wait queue head means waking up all
* queued vcpus. This will drain the list, allowing
@@ -556,13 +556,13 @@ void vm_event_cleanup(struct domain *d)
(void)vm_event_disable(d, &d->vm_event_paging);
}
#endif
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
destroy_waitqueue_head(&d->vm_event_monitor->wq);
(void)vm_event_disable(d, &d->vm_event_monitor);
}
#ifdef CONFIG_HAS_MEM_SHARING
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
destroy_waitqueue_head(&d->vm_event_share->wq);
(void)vm_event_disable(d, &d->vm_event_share);
@@ -646,7 +646,7 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec)
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_paging);
@@ -683,7 +683,7 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec)
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_monitor);
@@ -728,7 +728,7 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec)
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_share);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index e886894..eec7686 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1451,7 +1451,7 @@ static int assign_device(struct domain *d, u16 seg, u8
bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
- vm_event_check_ring(d->vm_event_paging) ||
+ vm_event_check(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 3cc2b20..381be0b 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -29,8 +29,8 @@
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
-/* Returns whether a ring has been set up */
-bool vm_event_check_ring(struct vm_event_domain *ved);
+/* Returns whether the VM event domain has been set up */
+bool vm_event_check(struct vm_event_domain *ved);
/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
* available space and the caller is a foreign domain. If the guest itself
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |