[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/vm-event: Misc fixups



commit 2828258c4dcae6a7871e6d627d85ad4ba9680d3e
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri May 31 12:54:28 2019 -0700
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Jun 4 14:43:51 2019 +0100

    xen/vm-event: Misc fixups
    
     * Drop redundant brackes, and inline qualifiers.
     * Insert newlines and spaces where appropriate.
     * Drop redundant NDEBUG - gdprint() is already conditional.  Fix the
       logging level, as gdprintk() already prefixes the guest marker.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
 xen/common/vm_event.c | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 72f42b408a..e8726805e7 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -102,6 +102,7 @@ static int vm_event_enable(
 static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
 {
     int avail_req = RING_FREE_REQUESTS(&ved->front_ring);
+
     avail_req -= ved->target_producers;
     avail_req -= ved->foreign_producers;
 
@@ -168,7 +169,7 @@ static void vm_event_wake_queued(struct domain *d, struct 
vm_event_domain *ved)
  */
 void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
 {
-    if (!list_empty(&ved->wq.list))
+    if ( !list_empty(&ved->wq.list) )
         vm_event_wake_queued(d, ved);
     else
         vm_event_wake_blocked(d, ved);
@@ -216,8 +217,8 @@ static int vm_event_disable(struct domain *d, struct 
vm_event_domain **p_ved)
     return 0;
 }
 
-static inline void vm_event_release_slot(struct domain *d,
-                                         struct vm_event_domain *ved)
+static void vm_event_release_slot(struct domain *d,
+                                  struct vm_event_domain *ved)
 {
     /* Update the accounting */
     if ( current->domain == d )
@@ -258,17 +259,16 @@ void vm_event_put_request(struct domain *d,
     RING_IDX req_prod;
     struct vcpu *curr = current;
 
-    if( !vm_event_check_ring(ved))
+    if( !vm_event_check_ring(ved) )
         return;
 
     if ( curr->domain != d )
     {
         req->flags |= VM_EVENT_FLAG_FOREIGN;
-#ifndef NDEBUG
+
         if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
-            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
+            gdprintk(XENLOG_WARNING, "d%dv%d was not paused.\n",
                      d->domain_id, req->vcpu_id);
-#endif
     }
 
     req->version = VM_EVENT_INTERFACE_VERSION;
@@ -474,6 +474,7 @@ static int vm_event_grab_slot(struct vm_event_domain *ved, 
int foreign)
 static int vm_event_wait_try_grab(struct vm_event_domain *ved, int *rc)
 {
     *rc = vm_event_grab_slot(ved, 0);
+
     return *rc;
 }
 
@@ -481,13 +482,15 @@ static int vm_event_wait_try_grab(struct vm_event_domain 
*ved, int *rc)
 static int vm_event_wait_slot(struct vm_event_domain *ved)
 {
     int rc = -EBUSY;
+
     wait_event(ved->wq, vm_event_wait_try_grab(ved, &rc) != -EBUSY);
+
     return rc;
 }
 
 bool vm_event_check_ring(struct vm_event_domain *ved)
 {
-    return (ved && ved->ring_page);
+    return ved && ved->ring_page;
 }
 
 /*
@@ -511,7 +514,7 @@ int __vm_event_claim_slot(struct domain *d, struct 
vm_event_domain *ved,
     if ( (current->domain == d) && allow_sleep )
         return vm_event_wait_slot(ved);
     else
-        return vm_event_grab_slot(ved, (current->domain != d));
+        return vm_event_grab_slot(ved, current->domain != d);
 }
 
 #ifdef CONFIG_HAS_MEM_PAGING
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.