[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/9] vm_event: Simplify vm_event interface



The domain reference can be part of the vm_event_domain structure
because for every call to a vm_event interface function both the latter
and it's corresponding domain are passed as parameters.

Affected functions:
- __vm_event_claim_slot / vm_event_claim_slot / vm_event_claim_slot_nosleep
- vm_event_cancel_slot
- vm_event_put_request

Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c |  5 ++---
 xen/arch/x86/mm/p2m.c         | 11 +++++------
 xen/common/monitor.c          |  4 ++--
 xen/common/vm_event.c         | 37 ++++++++++++++++++-------------------
 xen/include/xen/sched.h       |  2 ++
 xen/include/xen/vm_event.h    | 17 +++++++----------
 6 files changed, 36 insertions(+), 40 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index f16a3f5..9d80389 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -557,8 +557,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned 
long gfn,
         .u.mem_sharing.p2mt = p2m_ram_shared
     };
 
-    if ( (rc = __vm_event_claim_slot(d, 
-                        d->vm_event_share, allow_sleep)) < 0 )
+    if ( (rc = __vm_event_claim_slot(d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
     if ( v->domain == d )
@@ -567,7 +566,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned 
long gfn,
         vm_event_vcpu_pause(v);
     }
 
-    vm_event_put_request(d, d->vm_event_share, &req);
+    vm_event_put_request(d->vm_event_share, &req);
 
     return 0;
 }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4c99548..625fc9b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1652,7 +1652,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned 
long gfn,
      * correctness of the guest execution at this point.  If this is the only
      * page that happens to be paged-out, we'll be okay..  but it's likely the
      * guest will crash shortly anyways. */
-    int rc = vm_event_claim_slot(d, d->vm_event_paging);
+    int rc = vm_event_claim_slot(d->vm_event_paging);
     if ( rc < 0 )
         return;
 
@@ -1666,7 +1666,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned 
long gfn,
         /* Evict will fail now, tag this request for pager */
         req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-    vm_event_put_request(d, d->vm_event_paging, &req);
+    vm_event_put_request(d->vm_event_paging, &req);
 }
 
 /**
@@ -1704,8 +1704,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned 
long gfn_l)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* We're paging. There should be a ring */
-    int rc = vm_event_claim_slot(d, d->vm_event_paging);
-
+    int rc = vm_event_claim_slot(d->vm_event_paging);
     if ( rc == -EOPNOTSUPP )
     {
         gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
@@ -1746,7 +1745,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned 
long gfn_l)
     {
         /* gfn is already on its way back and vcpu is not paused */
     out_cancel:
-        vm_event_cancel_slot(d, d->vm_event_paging);
+        vm_event_cancel_slot(d->vm_event_paging);
         return;
     }
 
@@ -1754,7 +1753,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned 
long gfn_l)
     req.u.mem_paging.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    vm_event_put_request(d, d->vm_event_paging, &req);
+    vm_event_put_request(d->vm_event_paging, &req);
 }
 
 /**
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index d5c9ff1..b8d33c4 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -93,7 +93,7 @@ int monitor_traps(struct vcpu *v, bool sync, 
vm_event_request_t *req)
     int rc;
     struct domain *d = v->domain;
 
-    rc = vm_event_claim_slot(d, d->vm_event_monitor);
+    rc = vm_event_claim_slot(d->vm_event_monitor);
     switch ( rc )
     {
     case 0:
@@ -125,7 +125,7 @@ int monitor_traps(struct vcpu *v, bool sync, 
vm_event_request_t *req)
     }
 
     vm_event_fill_regs(req);
-    vm_event_put_request(d, d->vm_event_monitor, req);
+    vm_event_put_request(d->vm_event_monitor, req);
 
     return rc;
 }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 1dd3e48..3e87bbc 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -131,10 +131,11 @@ static unsigned int vm_event_ring_available(struct 
vm_event_domain *ved)
  * but need to be resumed where the ring is capable of processing at least
  * one event from them.
  */
-static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain 
*ved)
+static void vm_event_wake_blocked(struct vm_event_domain *ved)
 {
     struct vcpu *v;
     unsigned int avail_req = vm_event_ring_available(ved);
+    struct domain *d = ved->d;
 
     if ( avail_req == 0 || ved->blocked == 0 )
         return;
@@ -171,7 +172,7 @@ static void vm_event_wake_blocked(struct domain *d, struct 
vm_event_domain *ved)
  * was unable to do so, it is queued on a wait queue.  These are woken as
  * needed, and take precedence over the blocked vCPUs.
  */
-static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved)
+static void vm_event_wake_queued(struct vm_event_domain *ved)
 {
     unsigned int avail_req = vm_event_ring_available(ved);
 
@@ -186,12 +187,12 @@ static void vm_event_wake_queued(struct domain *d, struct 
vm_event_domain *ved)
  * call vm_event_wake() again, ensuring that any blocked vCPUs will get
  * unpaused once all the queued vCPUs have made it through.
  */
-void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
+void vm_event_wake(struct vm_event_domain *ved)
 {
     if (!list_empty(&ved->wq.list))
-        vm_event_wake_queued(d, ved);
+        vm_event_wake_queued(ved);
     else
-        vm_event_wake_blocked(d, ved);
+        vm_event_wake_blocked(ved);
 }
 
 static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
@@ -235,17 +236,16 @@ static int vm_event_disable(struct domain *d, struct 
vm_event_domain **ved)
     return 0;
 }
 
-static inline void vm_event_release_slot(struct domain *d,
-                                         struct vm_event_domain *ved)
+static inline void vm_event_release_slot(struct vm_event_domain *ved)
 {
     /* Update the accounting */
-    if ( current->domain == d )
+    if ( current->domain == ved->d )
         ved->target_producers--;
     else
         ved->foreign_producers--;
 
     /* Kick any waiters */
-    vm_event_wake(d, ved);
+    vm_event_wake(ved);
 }
 
 /*
@@ -267,8 +267,7 @@ static void vm_event_mark_and_pause(struct vcpu *v, struct 
vm_event_domain *ved)
  * overly full and its continued execution would cause stalling and excessive
  * waiting.  The vCPU will be automatically unpaused when the ring clears.
  */
-void vm_event_put_request(struct domain *d,
-                          struct vm_event_domain *ved,
+void vm_event_put_request(struct vm_event_domain *ved,
                           vm_event_request_t *req)
 {
     vm_event_front_ring_t *front_ring;
@@ -276,6 +275,7 @@ void vm_event_put_request(struct domain *d,
     unsigned int avail_req;
     RING_IDX req_prod;
     struct vcpu *curr = current;
+    struct domain *d = ved->d;
 
     if( !vm_event_check(ved))
         return;
@@ -309,7 +309,7 @@ void vm_event_put_request(struct domain *d,
     RING_PUSH_REQUESTS(front_ring);
 
     /* We've actually *used* our reservation, so release the slot. */
-    vm_event_release_slot(d, ved);
+    vm_event_release_slot(ved);
 
     /* Give this vCPU a black eye if necessary, on the way out.
      * See the comments above wake_blocked() for more information
@@ -351,7 +351,7 @@ static int vm_event_get_response(struct domain *d, struct 
vm_event_domain *ved,
 
     /* Kick any waiters -- since we've just consumed an event,
      * there may be additional space available in the ring. */
-    vm_event_wake(d, ved);
+    vm_event_wake(ved);
 
     vm_event_ring_unlock(ved);
 
@@ -450,13 +450,13 @@ static int vm_event_resume(struct domain *d, struct 
vm_event_domain *ved)
     return 0;
 }
 
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
+void vm_event_cancel_slot(struct vm_event_domain *ved)
 {
     if( !vm_event_check(ved) )
         return;
 
     vm_event_ring_lock(ved);
-    vm_event_release_slot(d, ved);
+    vm_event_release_slot(ved);
     vm_event_ring_unlock(ved);
 }
 
@@ -518,16 +518,15 @@ bool vm_event_check(struct vm_event_domain *ved)
  *               0: a spot has been reserved
  *
  */
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
-                          bool allow_sleep)
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
 {
     if ( !vm_event_check(ved) )
         return -EOPNOTSUPP;
 
-    if ( (current->domain == d) && allow_sleep )
+    if ( (current->domain == ved->d) && allow_sleep )
         return vm_event_wait_slot(ved);
     else
-        return vm_event_grab_slot(ved, (current->domain != d));
+        return vm_event_grab_slot(ved, (current->domain != ved->d));
 }
 
 #ifdef CONFIG_HAS_MEM_PAGING
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2201fac..7dee022 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -282,6 +282,8 @@ struct vcpu
 /* VM event */
 struct vm_event_domain
 {
+    /* Domain reference */
+    struct domain *d;
     /* ring lock */
     spinlock_t ring_lock;
     /* The ring has 64 entries */
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 0a05e5b..a5c82d6 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -45,23 +45,20 @@ bool vm_event_check(struct vm_event_domain *ved);
  * cancel_slot(), both of which are guaranteed to
  * succeed.
  */
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
-                          bool allow_sleep);
-static inline int vm_event_claim_slot(struct domain *d,
-                                      struct vm_event_domain *ved)
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep);
+static inline int vm_event_claim_slot(struct vm_event_domain *ved)
 {
-    return __vm_event_claim_slot(d, ved, true);
+    return __vm_event_claim_slot(ved, true);
 }
 
-static inline int vm_event_claim_slot_nosleep(struct domain *d,
-                                              struct vm_event_domain *ved)
+static inline int vm_event_claim_slot_nosleep(struct vm_event_domain *ved)
 {
-    return __vm_event_claim_slot(d, ved, false);
+    return __vm_event_claim_slot(ved, false);
 }
 
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
+void vm_event_cancel_slot(struct vm_event_domain *ved);
 
-void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
+void vm_event_put_request(struct vm_event_domain *ved,
                           vm_event_request_t *req);
 
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.