[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] domctl: cleanup



commit 5da92f7ea0136c412281275c0d282044b45e4872
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Mar 4 10:02:50 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Mar 4 10:02:50 2015 +0100

    domctl: cleanup
    
    - drop redundant "ret = 0" statements
    - drop unnecessary braces
    - eliminate a few single use local variables
    - move break statements inside case-specific braced scopes
    - eliminate trailing whitespace
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/common/domctl.c |  143 ++++++++++++++++++--------------------------------
 1 files changed, 52 insertions(+), 91 deletions(-)

diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index df68734..d396cc4 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -1,8 +1,8 @@
 /******************************************************************************
  * domctl.c
- * 
+ *
  * Domain management operations. For use by node control stack.
- * 
+ *
  * Copyright (c) 2002-2006, K A Fraser
  */
 
@@ -154,13 +154,13 @@ void getdomaininfo(struct domain *d, struct 
xen_domctl_getdomaininfo *info)
     u64 cpu_time = 0;
     int flags = XEN_DOMINF_blocked;
     struct vcpu_runstate_info runstate;
-    
+
     info->domain = d->domain_id;
     info->max_vcpu_id = XEN_INVALID_MAX_VCPU_ID;
     info->nr_online_vcpus = 0;
     info->ssidref = 0;
-    
-    /* 
+
+    /*
      * - domain is marked as blocked only if all its vcpus are blocked
      * - domain is marked as running if any of its vcpus is running
      */
@@ -237,7 +237,7 @@ static unsigned int default_vcpu0_location(cpumask_t 
*online)
     }
 
     /*
-     * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
+     * If we're on a HT system, we only auto-allocate to a non-primary HT. We
      * favour high numbered CPUs in the event of a tie.
      */
     cpumask_copy(&cpu_exclude_map, per_cpu(cpu_sibling_mask, 0));
@@ -517,8 +517,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         }
 
         free_vcpu_guest_context(c.nat);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_pausedomain:
         ret = -EINVAL;
@@ -531,11 +531,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         break;
 
     case XEN_DOMCTL_resumedomain:
-    {
         domain_resume(d);
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_createdomain:
     {
@@ -608,8 +605,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         op->domain = d->domain_id;
         copyback = 1;
         d = NULL;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_max_vcpus:
     {
@@ -698,8 +695,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
 
     maxvcpu_out_novcpulock:
         domain_unpause(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_destroydomain:
         ret = domain_kill(d);
@@ -716,14 +713,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                                         &op->u.nodeaffinity.nodemap);
         if ( !ret )
             ret = domain_set_node_affinity(d, &new_affinity);
+        break;
     }
-    break;
+
     case XEN_DOMCTL_getnodeaffinity:
-    {
         ret = nodemask_to_xenctl_bitmap(&op->u.nodeaffinity.nodemap,
                                         &d->node_affinity);
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setvcpuaffinity:
     case XEN_DOMCTL_getvcpuaffinity:
@@ -832,18 +828,16 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
                                                v->cpu_soft_affinity);
         }
+        break;
     }
-    break;
 
     case XEN_DOMCTL_scheduler_op:
-    {
         ret = sched_adjust(d, &op->u.scheduler_op);
         copyback = 1;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_getdomaininfo:
-    { 
+    {
         domid_t dom = op->domain;
 
         rcu_read_lock(&domlist_read_lock);
@@ -852,12 +846,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
             if ( d->domain_id >= dom )
                 break;
 
+        ret = -ESRCH;
         if ( d == NULL )
-        {
-            rcu_read_unlock(&domlist_read_lock);
-            ret = -ESRCH;
-            break;
-        }
+            goto getdomaininfo_out;
 
         ret = xsm_getdomaininfo(XSM_HOOK, d);
         if ( ret )
@@ -871,11 +862,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
     getdomaininfo_out:
         rcu_read_unlock(&domlist_read_lock);
         d = NULL;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_getvcpucontext:
-    { 
+    {
         vcpu_guest_context_u c = { .nat = NULL };
         struct vcpu         *v;
 
@@ -920,11 +911,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
 
     getvcpucontext_out:
         xfree(c.nat);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_getvcpuinfo:
-    { 
+    {
         struct vcpu   *v;
         struct vcpu_runstate_info runstate;
 
@@ -945,15 +936,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         op->u.getvcpuinfo.cpu      = v->processor;
         ret = 0;
         copyback = 1;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_max_mem:
     {
-        unsigned long new_max;
-
-        ret = -EINVAL;
-        new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
+        unsigned long new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT - 10);
 
         spin_lock(&d->page_alloc_lock);
         /*
@@ -962,31 +950,25 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
          * the meantime, while tot > max, all new allocations are disallowed.
          */
         d->max_pages = new_max;
-        ret = 0;
         spin_unlock(&d->page_alloc_lock);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_setdomainhandle:
-    {
         memcpy(d->handle, op->u.setdomainhandle.handle,
                sizeof(xen_domain_handle_t));
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setdebugging:
-    {
-        ret = -EINVAL;
-        if ( d == current->domain ) /* no domain_pause() */
-            break;
-
-        domain_pause(d);
-        d->debugger_attached = !!op->u.setdebugging.enable;
-        domain_unpause(d); /* causes guest to latch new status */
-        ret = 0;
-    }
-    break;
+        if ( unlikely(d == current->domain) ) /* no domain_pause() */
+            ret = -EINVAL;
+        else
+        {
+            domain_pause(d);
+            d->debugger_attached = !!op->u.setdebugging.enable;
+            domain_unpause(d); /* causes guest to latch new status */
+        }
+        break;
 
     case XEN_DOMCTL_irq_permission:
     {
@@ -1005,8 +987,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
             ret = irq_permit_access(d, irq);
         else
             ret = irq_deny_access(d, irq);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_iomem_permission:
     {
@@ -1028,8 +1010,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
             ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
         if ( !ret )
             memory_type_changed(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_memory_mapping:
     {
@@ -1080,15 +1062,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         }
         /* Do this unconditionally to cover errors on above failure paths. */
         memory_type_changed(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_settimeoffset:
-    {
         domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_set_target:
     {
@@ -1114,16 +1093,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
 
         /* Hold reference on @e until we destroy @d. */
         d->target = e;
-
-        ret = 0;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_subscribe:
-    {
         d->suspend_evtchn = op->u.subscribe.port;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_mem_event_op:
         ret = mem_event_domctl(d, &op->u.mem_event_op,
@@ -1132,41 +1107,28 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         break;
 
     case XEN_DOMCTL_disable_migrate:
-    {
         d->disable_migrate = op->u.disable_migrate.disable;
-    }
-    break;
+        break;
 
 #ifdef HAS_MEM_ACCESS
     case XEN_DOMCTL_set_access_required:
-    {
-        struct p2m_domain* p2m;
-
-        ret = -EPERM;
-        if ( current->domain == d )
-            break;
-
-        ret = 0;
-        p2m = p2m_get_hostp2m(d);
-        p2m->access_required = op->u.access_required.access_required;
-    }
-    break;
+        if ( unlikely(current->domain == d) )
+            ret = -EPERM;
+        else
+            p2m_get_hostp2m(d)->access_required =
+                op->u.access_required.access_required;
+        break;
 #endif
 
     case XEN_DOMCTL_set_virq_handler:
-    {
-        uint32_t virq = op->u.set_virq_handler.virq;
-        ret = set_global_virq_handler(d, virq);
-    }
-    break;
+        ret = set_global_virq_handler(d, op->u.set_virq_handler.virq);
+        break;
 
     case XEN_DOMCTL_set_max_evtchn:
-    {
         d->max_evtchn_port = min_t(unsigned int,
                                    op->u.set_max_evtchn.max_port,
                                    INT_MAX);
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setvnumainfo:
     {
@@ -1185,9 +1147,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         d->vnuma = vnuma;
         write_unlock(&d->vnuma_rwlock);
 
-        ret = 0;
+        break;
     }
-    break;
 
     default:
         ret = arch_do_domctl(op, d, u_domctl);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.