[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [ACM] Coding style cleanups.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1185180960 -3600
# Node ID dae6a2790f6b615959fa8e1aaa640bbd78c8f4ad
# Parent  4a8dbbc16d48b5efbe7b4361a026c5959b35c5bf
[ACM] Coding style cleanups.
Signed-off-by: Stefan Berger <stefanb@xxxxxxxxxx>
---
 xen/acm/acm_chinesewall_hooks.c             |  180 +++++++-------
 xen/acm/acm_policy.c                        |  302 ++++++++++++++----------
 xen/acm/acm_simple_type_enforcement_hooks.c |  346 +++++++++++++++++-----------
 3 files changed, 489 insertions(+), 339 deletions(-)

diff -r 4a8dbbc16d48 -r dae6a2790f6b xen/acm/acm_chinesewall_hooks.c
--- a/xen/acm/acm_chinesewall_hooks.c   Mon Jul 23 09:45:23 2007 +0100
+++ b/xen/acm/acm_chinesewall_hooks.c   Mon Jul 23 09:56:00 2007 +0100
@@ -73,10 +73,10 @@ int acm_init_chwall_policy(void)
         (domaintype_t *) xmalloc_array(domaintype_t,
                                        chwall_bin_pol.max_types);
 
-    if ((chwall_bin_pol.conflict_sets == NULL)
+    if ( (chwall_bin_pol.conflict_sets == NULL)
         || (chwall_bin_pol.running_types == NULL)
         || (chwall_bin_pol.ssidrefs == NULL)
-        || (chwall_bin_pol.conflict_aggregate_set == NULL))
+        || (chwall_bin_pol.conflict_aggregate_set == NULL) )
         return ACM_INIT_SSID_ERROR;
 
     /* initialize state */
@@ -97,14 +97,15 @@ static int chwall_init_domain_ssid(void 
 {
     struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
     traceprintk("%s.\n", __func__);
-    if (chwall_ssidp == NULL)
+
+    if ( chwall_ssidp == NULL )
         return ACM_INIT_SSID_ERROR;
 
     chwall_ssidp->chwall_ssidref =
         GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
 
-    if ((chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
-        || (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID))
+    if ( (chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
+        || (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID) )
     {
         printkd("%s: ERROR chwall_ssidref(%x) undefined (>max) or unset 
(0).\n",
                 __func__, chwall_ssidp->chwall_ssidref);
@@ -119,7 +120,6 @@ static int chwall_init_domain_ssid(void 
 
 static void chwall_free_domain_ssid(void *chwall_ssid)
 {
-    traceprintk("%s.\n", __func__);
     xfree(chwall_ssid);
     return;
 }
@@ -132,7 +132,7 @@ static int chwall_dump_policy(u8 * buf, 
         (struct acm_chwall_policy_buffer *) buf;
     int ret = 0;
 
-    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+    if ( buf_size < sizeof(struct acm_chwall_policy_buffer) )
         return -EINVAL;
 
     chwall_buf->chwall_max_types = cpu_to_be32(chwall_bin_pol.max_types);
@@ -159,7 +159,7 @@ static int chwall_dump_policy(u8 * buf, 
 
     ret = (ret + 7) & ~7;
 
-    if (buf_size < ret)
+    if ( buf_size < ret )
         return -EINVAL;
 
     /* now copy buffers over */
@@ -214,12 +214,12 @@ chwall_init_state(struct acm_chwall_poli
         traceprintk("%s: validating policy for domain %x (chwall-REF=%x).\n",
                     __func__, d->domain_id, chwall_ssidref);
         /* a) adjust types ref-count for running domains */
-        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+        for ( i = 0; i < chwall_buf->chwall_max_types; i++ )
             running_types[i] +=
                 ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i];
 
         /* b) check for conflict */
-        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+        for ( i = 0; i < chwall_buf->chwall_max_types; i++ )
             if (conflict_aggregate_set[i] &&
                 ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i])
             {
@@ -233,11 +233,11 @@ chwall_init_state(struct acm_chwall_poli
             }
         /* set violation and break out of the loop */
         /* c) adapt conflict aggregate set for this domain (notice conflicts) 
*/
-        for (i = 0; i < chwall_buf->chwall_max_conflictsets; i++)
+        for ( i = 0; i < chwall_buf->chwall_max_conflictsets; i++ )
         {
             int common = 0;
             /* check if conflict_set_i and ssidref have common types */
-            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+            for ( j = 0; j < chwall_buf->chwall_max_types; j++ )
                 if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
                     ssidrefs[chwall_ssidref *
                             chwall_buf->chwall_max_types + j])
@@ -248,7 +248,7 @@ chwall_init_state(struct acm_chwall_poli
             if (common == 0)
                 continue;       /* try next conflict set */
             /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+            for ( j = 0; j < chwall_buf->chwall_max_types; j++ )
                 if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
                     !ssidrefs[chwall_ssidref *
                              chwall_buf->chwall_max_types + j])
@@ -268,7 +268,8 @@ int
 int
 do_chwall_init_state_curr(struct acm_sized_buffer *errors)
 {
-    struct acm_chwall_policy_buffer chwall_buf = {
+    struct acm_chwall_policy_buffer chwall_buf =
+    {
          /* only these two are important */
          .chwall_max_types        = chwall_bin_pol.max_types,
          .chwall_max_conflictsets = chwall_bin_pol.max_conflictsets,
@@ -300,8 +301,8 @@ static int _chwall_update_policy(u8 *buf
     /* policy write-locked already */
     struct acm_chwall_policy_buffer *chwall_buf =
         (struct acm_chwall_policy_buffer *) buf;
-    void *ssids = NULL, *conflict_sets = NULL, *running_types =
-        NULL, *conflict_aggregate_set = NULL;
+    void *ssids = NULL, *conflict_sets = NULL, *running_types = NULL,
+         *conflict_aggregate_set = NULL;
 
     /* 1. allocate new buffers */
     ssids =
@@ -317,23 +318,23 @@ static int _chwall_update_policy(u8 *buf
     conflict_aggregate_set =
         xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
 
-    if ((ssids == NULL) || (conflict_sets == NULL)
-        || (running_types == NULL) || (conflict_aggregate_set == NULL))
+    if ( (ssids == NULL) || (conflict_sets == NULL) ||
+         (running_types == NULL) || (conflict_aggregate_set == NULL) )
         goto error_free;
 
     /* 2. set new policy */
-    if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) *
-        chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs >
-        buf_size)
+    if ( chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) *
+         chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs >
+         buf_size )
         goto error_free;
 
     arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
            sizeof(domaintype_t),
            chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
 
-    if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) *
-        chwall_buf->chwall_max_types *
-        chwall_buf->chwall_max_conflictsets > buf_size)
+    if ( chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) *
+         chwall_buf->chwall_max_types *
+         chwall_buf->chwall_max_conflictsets > buf_size )
         goto error_free;
 
     arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
@@ -349,10 +350,10 @@ static int _chwall_update_policy(u8 *buf
 
     /* 3. now re-calculate the state for the new policy based on running 
domains;
      *    this can fail if new policy is conflicting with running domains */
-    if (chwall_init_state(chwall_buf, ssids,
-                          conflict_sets, running_types,
-                          conflict_aggregate_set,
-                          errors))
+    if ( chwall_init_state(chwall_buf, ssids,
+                           conflict_sets, running_types,
+                           conflict_aggregate_set,
+                           errors))
     {
         printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n",
                __func__);
@@ -360,7 +361,8 @@ static int _chwall_update_policy(u8 *buf
     }
 
     /* if this was only a test run, exit with ACM_OK */
-    if (test_only) {
+    if ( test_only )
+    {
         rc = ACM_OK;
         goto error_free;
     }
@@ -377,10 +379,13 @@ static int _chwall_update_policy(u8 *buf
     chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
     chwall_bin_pol.running_types = running_types;
     chwall_bin_pol.conflict_sets = conflict_sets;
+
     return ACM_OK;
 
  error_free:
-    if (!test_only) printk("%s: ERROR setting policy.\n", __func__);
+    if ( !test_only )
+        printk("%s: ERROR setting policy.\n", __func__);
+
     xfree(ssids);
     xfree(conflict_sets);
     xfree(running_types);
@@ -397,7 +402,7 @@ static int chwall_test_policy(u8 *buf, u
     struct acm_chwall_policy_buffer *chwall_buf =
         (struct acm_chwall_policy_buffer *) buf;
 
-    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+    if ( buf_size < sizeof(struct acm_chwall_policy_buffer) )
         return -EINVAL;
 
     /* rewrite the policy due to endianess */
@@ -419,15 +424,14 @@ static int chwall_test_policy(u8 *buf, u
         be32_to_cpu(chwall_buf->chwall_conflict_aggregate_offset);
 
     /* policy type and version checks */
-    if ((chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
-        (chwall_buf->policy_version != ACM_CHWALL_VERSION))
+    if ( (chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
+         (chwall_buf->policy_version != ACM_CHWALL_VERSION) )
         return -EINVAL;
 
     /* during boot dom0_chwall_ssidref is set */
-    if (is_bootpolicy &&
-        (dom0_chwall_ssidref >= chwall_buf->chwall_max_ssidrefs))
+    if ( is_bootpolicy &&
+         (dom0_chwall_ssidref >= chwall_buf->chwall_max_ssidrefs) )
         return -EINVAL;
-
 
     return _chwall_update_policy(buf, buf_size, 1, errors);
 }
@@ -448,17 +452,17 @@ static int chwall_dump_ssid_types(ssidre
     int i;
 
     /* fill in buffer */
-    if (chwall_bin_pol.max_types > len)
+    if ( chwall_bin_pol.max_types > len )
         return -EFAULT;
 
-    if (ssidref >= chwall_bin_pol.max_ssidrefs)
+    if ( ssidref >= chwall_bin_pol.max_ssidrefs )
         return -EFAULT;
 
     /* read types for chwall ssidref */
-    for (i = 0; i < chwall_bin_pol.max_types; i++)
-    {
-        if (chwall_bin_pol.
-            ssidrefs[ssidref * chwall_bin_pol.max_types + i])
+    for ( i = 0; i < chwall_bin_pol.max_types; i++ )
+    {
+        if ( chwall_bin_pol.
+             ssidrefs[ssidref * chwall_bin_pol.max_types + i] )
             buf[i] = 1;
         else
             buf[i] = 0;
@@ -476,15 +480,16 @@ static int _chwall_pre_domain_create(voi
 {
     ssidref_t chwall_ssidref;
     int i, j;
-    traceprintk("%s.\n", __func__);
 
     chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+
     if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID)
     {
         printk("%s: ERROR CHWALL SSID is NOT SET but policy enforced.\n",
                __func__);
         return ACM_ACCESS_DENIED;       /* catching and indicating config 
error */
     }
+
     if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
     {
         printk("%s: ERROR chwall_ssidref > max(%x).\n",
@@ -503,15 +508,15 @@ static int _chwall_pre_domain_create(voi
 
     /* B: chinese wall conflict set adjustment (so that other
      *      other domains simultaneously created are evaluated against this 
new set)*/
-    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    for ( i = 0; i < chwall_bin_pol.max_conflictsets; i++ )
     {
         int common = 0;
         /* check if conflict_set_i and ssidref have common types */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                          chwall_bin_pol.max_types + j])
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.
+                 conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                            chwall_bin_pol.max_types + j] )
             {
                 common = 1;
                 break;
@@ -519,12 +524,12 @@ static int _chwall_pre_domain_create(voi
         if (common == 0)
             continue;           /* try next conflict set */
         /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                           chwall_bin_pol.max_types + j])
-                chwall_bin_pol.conflict_aggregate_set[j]++;
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.
+                 conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                             chwall_bin_pol.max_types + j])
+                 chwall_bin_pol.conflict_aggregate_set[j]++;
     }
     return ACM_ACCESS_PERMITTED;
 }
@@ -534,18 +539,16 @@ static void _chwall_post_domain_create(d
 {
     int i, j;
     ssidref_t chwall_ssidref;
-    traceprintk("%s.\n", __func__);
 
     chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
     /* adjust types ref-count for running domains */
-    for (i = 0; i < chwall_bin_pol.max_types; i++)
+    for ( i = 0; i < chwall_bin_pol.max_types; i++ )
         chwall_bin_pol.running_types[i] +=
             chwall_bin_pol.ssidrefs[chwall_ssidref *
                                    chwall_bin_pol.max_types + i];
-    if (domid)
-    {
+    if ( domid )
         return;
-    }
+
     /* Xen does not call pre-create hook for DOM0;
      * to consider type conflicts of any domain with DOM0, we need
      * to adjust the conflict_aggregate for DOM0 here the same way it
@@ -555,27 +558,27 @@ static void _chwall_post_domain_create(d
 
     /* chinese wall conflict set adjustment (so that other
      *      other domains simultaneously created are evaluated against this 
new set)*/
-    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    for ( i = 0; i < chwall_bin_pol.max_conflictsets; i++ )
     {
         int common = 0;
         /* check if conflict_set_i and ssidref have common types */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                          chwall_bin_pol.max_types + j])
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.
+                 conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                            chwall_bin_pol.max_types + j] )
             {
                 common = 1;
                 break;
             }
-        if (common == 0)
+        if ( common == 0 )
             continue;           /* try next conflict set */
         /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                           chwall_bin_pol.max_types + j])
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.
+                 conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                             chwall_bin_pol.max_types + j] )
                 chwall_bin_pol.conflict_aggregate_set[j]++;
     }
     return;
@@ -593,10 +596,11 @@ static int chwall_domain_create(void *su
 {
     int rc;
     read_lock(&acm_bin_pol_rwlock);
+
     rc = _chwall_pre_domain_create(subject_ssid, ssidref);
-    if (rc == ACM_ACCESS_PERMITTED) {
+    if ( rc == ACM_ACCESS_PERMITTED )
         _chwall_post_domain_create(domid, ssidref);
-    }
+
     read_unlock(&acm_bin_pol_rwlock);
     return rc;
 }
@@ -613,25 +617,23 @@ static void chwall_domain_destroy(void *
                                                  object_ssid);
     ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
 
-    traceprintk("%s.\n", __func__);
-
     read_lock(&acm_bin_pol_rwlock);
+
     /* adjust running types set */
-    for (i = 0; i < chwall_bin_pol.max_types; i++)
+    for ( i = 0; i < chwall_bin_pol.max_types; i++ )
         chwall_bin_pol.running_types[i] -=
             chwall_bin_pol.ssidrefs[chwall_ssidref *
                                    chwall_bin_pol.max_types + i];
 
     /* roll-back: re-adjust conflicting types aggregate */
-    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    for ( i = 0; i < chwall_bin_pol.max_conflictsets; i++ )
     {
         int common = 0;
         /* check if conflict_set_i and ssidref have common types */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                          chwall_bin_pol.max_types + j])
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                            chwall_bin_pol.max_types + j])
             {
                 common = 1;
                 break;
@@ -639,14 +641,16 @@ static void chwall_domain_destroy(void *
         if (common == 0)
             continue;           /* try next conflict set, this one does not 
include any type of chwall_ssidref */
         /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
-        for (j = 0; j < chwall_bin_pol.max_types; j++)
-            if (chwall_bin_pol.
-                conflict_sets[i * chwall_bin_pol.max_types + j]
-                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
-                                           chwall_bin_pol.max_types + j])
+        for ( j = 0; j < chwall_bin_pol.max_types; j++ )
+            if ( chwall_bin_pol.
+                 conflict_sets[i * chwall_bin_pol.max_types + j]
+                 && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                             chwall_bin_pol.max_types + j])
                 chwall_bin_pol.conflict_aggregate_set[j]--;
     }
+
     read_unlock(&acm_bin_pol_rwlock);
+
     return;
 }
 
diff -r 4a8dbbc16d48 -r dae6a2790f6b xen/acm/acm_policy.c
--- a/xen/acm/acm_policy.c      Mon Jul 23 09:45:23 2007 +0100
+++ b/xen/acm/acm_policy.c      Mon Jul 23 09:56:00 2007 +0100
@@ -37,9 +37,9 @@ static int acm_check_deleted_ssidrefs(st
 static int acm_check_deleted_ssidrefs(struct acm_sized_buffer *dels,
                                       struct acm_sized_buffer *errors);
 static void acm_doms_change_ssidref(ssidref_t (*translator)
-                                     (const struct acm_ssid_domain *,
-                                      const struct acm_sized_buffer *),
-                                      struct acm_sized_buffer 
*translation_map);
+                                   (const struct acm_ssid_domain *,
+                                    const struct acm_sized_buffer *),
+                                    struct acm_sized_buffer *translation_map);
 static void acm_doms_restore_ssidref(void);
 static ssidref_t oldssid_to_newssid(const struct acm_ssid_domain *,
                                     const struct acm_sized_buffer *map);
@@ -50,15 +50,15 @@ acm_set_policy(XEN_GUEST_HANDLE_64(void)
 {
     u8 *policy_buffer = NULL;
     int ret = -EFAULT;
- 
-    if (buf_size < sizeof(struct acm_policy_buffer))
+
+    if ( buf_size < sizeof(struct acm_policy_buffer) )
         return -EFAULT;
 
     /* copy buffer from guest domain */
-    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+    if ( (policy_buffer = xmalloc_array(u8, buf_size)) == NULL )
         return -ENOMEM;
 
-    if (copy_from_guest(policy_buffer, buf, buf_size))
+    if ( copy_from_guest(policy_buffer, buf, buf_size) )
     {
         printk("%s: Error copying!\n",__func__);
         goto error_free;
@@ -93,9 +93,8 @@ _acm_update_policy(void *buf, u32 buf_si
 
     if (  require_update != 0 &&
         ( deletions == NULL || ssidchanges == NULL ) )
-    {
-        goto error_lock_free;
-    }
+        goto error_lock_free;
+
     require_update = 1;
     /*
        first some tests to check compatibility of new policy with
@@ -103,15 +102,13 @@ _acm_update_policy(void *buf, u32 buf_si
      */
 
     /* if ssidrefs are to be deleted, make sure no domain is using them */
-    if (deletions != NULL) {
-        if (acm_check_deleted_ssidrefs(deletions, errors))
+    if ( deletions != NULL )
+        if ( acm_check_deleted_ssidrefs(deletions, errors) )
             goto error_lock_free;
-    }
-
-    if ((ssidchanges != NULL) && (ssidchanges->num_items > 0)) {
+
+    if ( (ssidchanges != NULL) && (ssidchanges->num_items > 0) )
         /* assign all running domains new ssidrefs as requested */
         acm_doms_change_ssidref(oldssid_to_newssid, ssidchanges);
-    }
 
     /* test primary policy data with the new ssidrefs */
     offset = be32_to_cpu(pol->primary_buffer_offset);
@@ -129,9 +126,8 @@ _acm_update_policy(void *buf, u32 buf_si
     if ( (offset + length) > buf_size ||
          acm_secondary_ops->test_binary_policy(buf + offset, length,
                                                is_bootpolicy,
-                                               errors)) {
-        goto error_lock_free;
-    }
+                                               errors))
+        goto error_lock_free;
 
     /* end of testing --- now real updates */
 
@@ -140,7 +136,7 @@ _acm_update_policy(void *buf, u32 buf_si
 
     /* set label reference name */
     if ( (offset + length) > buf_size ||
-        acm_set_policy_reference(buf + offset, length) )
+         acm_set_policy_reference(buf + offset, length) )
         goto error_lock_free;
 
     /* set primary policy data */
@@ -161,16 +157,16 @@ _acm_update_policy(void *buf, u32 buf_si
            sizeof(acm_bin_pol.xml_pol_version));
 
     if ( acm_primary_ops->is_default_policy() &&
-         acm_secondary_ops->is_default_policy() ) {
+         acm_secondary_ops->is_default_policy() )
         require_update = 0;
-    }
 
     write_unlock(&acm_bin_pol_rwlock);
 
     return ACM_OK;
 
 error_lock_free:
-    if ((ssidchanges != NULL) && (ssidchanges->num_items > 0)) {
+    if ( (ssidchanges != NULL) && (ssidchanges->num_items > 0) )
+    {
         acm_doms_restore_ssidref();
     }
     do_chwall_init_state_curr(NULL);
@@ -189,18 +185,21 @@ do_acm_set_policy(void *buf, u32 buf_siz
     struct acm_policy_buffer *pol = (struct acm_policy_buffer *)buf;
 
     /* some sanity checking */
-    if ((be32_to_cpu(pol->magic) != ACM_MAGIC) ||
-        (buf_size != be32_to_cpu(pol->len)) ||
-        (be32_to_cpu(pol->policy_version) != ACM_POLICY_VERSION))
+    if ( (be32_to_cpu(pol->magic) != ACM_MAGIC) ||
+         (buf_size != be32_to_cpu(pol->len)) ||
+         (be32_to_cpu(pol->policy_version) != ACM_POLICY_VERSION) )
     {
         printk("%s: ERROR in Magic, Version, or buf size.\n", __func__);
         goto error_free;
     }
 
-    if (acm_active_security_policy == ACM_POLICY_UNDEFINED) {
+    if ( acm_active_security_policy == ACM_POLICY_UNDEFINED )
+    {
         /* setup the policy with the boot policy */
-        if (acm_init_binary_policy((be32_to_cpu(pol->secondary_policy_code) << 
4) |
-                                   be32_to_cpu(pol->primary_policy_code))) {
+        if ( acm_init_binary_policy(
+                             (be32_to_cpu(pol->secondary_policy_code) << 4) |
+                              be32_to_cpu(pol->primary_policy_code)) )
+        {
             goto error_free;
         }
         acm_active_security_policy = (acm_bin_pol.secondary_policy_code << 4) |
@@ -208,8 +207,10 @@ do_acm_set_policy(void *buf, u32 buf_siz
     }
 
     /* once acm_active_security_policy is set, it cannot be changed */
-    if ((be32_to_cpu(pol->primary_policy_code) != 
acm_bin_pol.primary_policy_code) ||
-        (be32_to_cpu(pol->secondary_policy_code) != 
acm_bin_pol.secondary_policy_code))
+    if ( (be32_to_cpu(pol->primary_policy_code) !=
+                                        acm_bin_pol.primary_policy_code) ||
+         (be32_to_cpu(pol->secondary_policy_code) !=
+                                        acm_bin_pol.secondary_policy_code) )
     {
         printkd("%s: Wrong policy type in boot policy!\n", __func__);
         goto error_free;
@@ -232,18 +233,20 @@ acm_get_policy(XEN_GUEST_HANDLE_64(void)
     int ret;
     struct acm_policy_buffer *bin_pol;
 
-    if (buf_size < sizeof(struct acm_policy_buffer))
+    if ( buf_size < sizeof(struct acm_policy_buffer) )
         return -EFAULT;
 
-    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+    if ( (policy_buffer = xmalloc_array(u8, buf_size)) == NULL )
         return -ENOMEM;
 
     read_lock(&acm_bin_pol_rwlock);
 
     bin_pol = (struct acm_policy_buffer *)policy_buffer;
     bin_pol->magic = cpu_to_be32(ACM_MAGIC);
-    bin_pol->primary_policy_code = 
cpu_to_be32(acm_bin_pol.primary_policy_code);
-    bin_pol->secondary_policy_code = 
cpu_to_be32(acm_bin_pol.secondary_policy_code);
+    bin_pol->primary_policy_code =
+                                cpu_to_be32(acm_bin_pol.primary_policy_code);
+    bin_pol->secondary_policy_code =
+                                cpu_to_be32(acm_bin_pol.secondary_policy_code);
 
     bin_pol->len = cpu_to_be32(sizeof(struct acm_policy_buffer));
     bin_pol->policy_reference_offset = cpu_to_be32(be32_to_cpu(bin_pol->len));
@@ -254,39 +257,47 @@ acm_get_policy(XEN_GUEST_HANDLE_64(void)
            &acm_bin_pol.xml_pol_version,
            sizeof(struct acm_policy_version));
 
-    ret = acm_dump_policy_reference(policy_buffer + 
be32_to_cpu(bin_pol->policy_reference_offset),
-                                    buf_size - 
be32_to_cpu(bin_pol->policy_reference_offset));
-    if (ret < 0)
+    ret = acm_dump_policy_reference(
+               policy_buffer + be32_to_cpu(bin_pol->policy_reference_offset),
+               buf_size - be32_to_cpu(bin_pol->policy_reference_offset));
+
+    if ( ret < 0 )
         goto error_free_unlock;
 
     bin_pol->len = cpu_to_be32(be32_to_cpu(bin_pol->len) + ret);
     bin_pol->primary_buffer_offset = cpu_to_be32(be32_to_cpu(bin_pol->len));
 
-    ret = acm_primary_ops->dump_binary_policy (policy_buffer + 
be32_to_cpu(bin_pol->primary_buffer_offset),
-                                               buf_size - 
be32_to_cpu(bin_pol->primary_buffer_offset));
-    if (ret < 0)
+    ret = acm_primary_ops->dump_binary_policy(
+                 policy_buffer + be32_to_cpu(bin_pol->primary_buffer_offset),
+                 buf_size - be32_to_cpu(bin_pol->primary_buffer_offset));
+
+    if ( ret < 0 )
         goto error_free_unlock;
 
     bin_pol->len = cpu_to_be32(be32_to_cpu(bin_pol->len) + ret);
     bin_pol->secondary_buffer_offset = cpu_to_be32(be32_to_cpu(bin_pol->len));
 
-    ret = acm_secondary_ops->dump_binary_policy(policy_buffer + 
be32_to_cpu(bin_pol->secondary_buffer_offset),
-                                                buf_size - 
be32_to_cpu(bin_pol->secondary_buffer_offset));
-    if (ret < 0)
+    ret = acm_secondary_ops->dump_binary_policy(
+               policy_buffer + be32_to_cpu(bin_pol->secondary_buffer_offset),
+               buf_size - be32_to_cpu(bin_pol->secondary_buffer_offset));
+
+    if ( ret < 0 )
         goto error_free_unlock;
 
     bin_pol->len = cpu_to_be32(be32_to_cpu(bin_pol->len) + ret);
-    if (copy_to_guest(buf, policy_buffer, be32_to_cpu(bin_pol->len)))
+    if ( copy_to_guest(buf, policy_buffer, be32_to_cpu(bin_pol->len)) )
         goto error_free_unlock;
 
     read_unlock(&acm_bin_pol_rwlock);
     xfree(policy_buffer);
+
     return ACM_OK;
 
  error_free_unlock:
     read_unlock(&acm_bin_pol_rwlock);
     printk("%s: Error getting policy.\n", __func__);
     xfree(policy_buffer);
+
     return -EFAULT;
 }
 
@@ -298,40 +309,50 @@ acm_dump_statistics(XEN_GUEST_HANDLE_64(
     int len1, len2;
     struct acm_stats_buffer acm_stats;
 
-    if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
+    if ( (stats_buffer = xmalloc_array(u8, buf_size)) == NULL )
         return -ENOMEM;
 
     read_lock(&acm_bin_pol_rwlock);
      
-    len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer),
-                                            buf_size - sizeof(struct 
acm_stats_buffer));
-    if (len1 < 0)
+    len1 = acm_primary_ops->dump_statistics(
+                             stats_buffer + sizeof(struct acm_stats_buffer),
+                             buf_size - sizeof(struct acm_stats_buffer));
+    if ( len1 < 0 )
         goto error_lock_free;
       
-    len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer) + len1,
-                                              buf_size - sizeof(struct 
acm_stats_buffer) - len1);
-    if (len2 < 0)
+    len2 = acm_secondary_ops->dump_statistics(
+                      stats_buffer + sizeof(struct acm_stats_buffer) + len1,
+                      buf_size - sizeof(struct acm_stats_buffer) - len1);
+    if ( len2 < 0 )
         goto error_lock_free;
 
     acm_stats.magic = cpu_to_be32(ACM_MAGIC);
-    acm_stats.primary_policy_code = 
cpu_to_be32(acm_bin_pol.primary_policy_code);
-    acm_stats.secondary_policy_code = 
cpu_to_be32(acm_bin_pol.secondary_policy_code);
-    acm_stats.primary_stats_offset = cpu_to_be32(sizeof(struct 
acm_stats_buffer));
-    acm_stats.secondary_stats_offset = cpu_to_be32(sizeof(struct 
acm_stats_buffer) + len1);
+    acm_stats.primary_policy_code =
+                           cpu_to_be32(acm_bin_pol.primary_policy_code);
+    acm_stats.secondary_policy_code =
+                           cpu_to_be32(acm_bin_pol.secondary_policy_code);
+    acm_stats.primary_stats_offset =
+                           cpu_to_be32(sizeof(struct acm_stats_buffer));
+    acm_stats.secondary_stats_offset =
+                           cpu_to_be32(sizeof(struct acm_stats_buffer) + len1);
     acm_stats.len = cpu_to_be32(sizeof(struct acm_stats_buffer) + len1 + len2);
 
     memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
 
-    if (copy_to_guest(buf, stats_buffer, sizeof(struct acm_stats_buffer) + 
len1 + len2))
+    if ( copy_to_guest(buf,
+                       stats_buffer,
+                       sizeof(struct acm_stats_buffer) + len1 + len2) )
         goto error_lock_free;
 
     read_unlock(&acm_bin_pol_rwlock);
     xfree(stats_buffer);
+
     return ACM_OK;
 
  error_lock_free:
     read_unlock(&acm_bin_pol_rwlock);
     xfree(stats_buffer);
+
     return -EFAULT;
 }
 
@@ -343,10 +364,10 @@ acm_get_ssid(ssidref_t ssidref, XEN_GUES
     u8 *ssid_buffer;
     int ret;
     struct acm_ssid_buffer *acm_ssid;
-    if (buf_size < sizeof(struct acm_ssid_buffer))
+    if ( buf_size < sizeof(struct acm_ssid_buffer) )
         return -EFAULT;
 
-    if ((ssid_buffer = xmalloc_array(u8, buf_size)) == NULL)
+    if ( (ssid_buffer = xmalloc_array(u8, buf_size)) == NULL )
         return -ENOMEM;
 
     read_lock(&acm_bin_pol_rwlock);
@@ -358,45 +379,50 @@ acm_get_ssid(ssidref_t ssidref, XEN_GUES
     acm_ssid->secondary_policy_code = acm_bin_pol.secondary_policy_code;
 
     acm_ssid->policy_reference_offset = acm_ssid->len;
-    ret = acm_dump_policy_reference(ssid_buffer + 
acm_ssid->policy_reference_offset,
-                                    buf_size - 
acm_ssid->policy_reference_offset);
-    if (ret < 0)
+    ret = acm_dump_policy_reference(
+                          ssid_buffer + acm_ssid->policy_reference_offset,
+                          buf_size - acm_ssid->policy_reference_offset);
+    if ( ret < 0 )
         goto error_free_unlock;
 
     acm_ssid->len += ret;
     acm_ssid->primary_types_offset = acm_ssid->len;
 
     /* ret >= 0 --> ret == max_types */
-    ret = acm_primary_ops->dump_ssid_types(ACM_PRIMARY(ssidref),
-                                           ssid_buffer + 
acm_ssid->primary_types_offset,
-                                           buf_size - 
acm_ssid->primary_types_offset);
-    if (ret < 0)
+    ret = acm_primary_ops->dump_ssid_types(
+                                 ACM_PRIMARY(ssidref),
+                                 ssid_buffer + acm_ssid->primary_types_offset,
+                                 buf_size - acm_ssid->primary_types_offset);
+    if ( ret < 0 )
         goto error_free_unlock;
 
     acm_ssid->len += ret;
     acm_ssid->primary_max_types = ret;
     acm_ssid->secondary_types_offset = acm_ssid->len;
 
-    ret = acm_secondary_ops->dump_ssid_types(ACM_SECONDARY(ssidref),
-                                             ssid_buffer + 
acm_ssid->secondary_types_offset,
-                                             buf_size - 
acm_ssid->secondary_types_offset);
-    if (ret < 0)
+    ret = acm_secondary_ops->dump_ssid_types(
+                             ACM_SECONDARY(ssidref),
+                             ssid_buffer + acm_ssid->secondary_types_offset,
+                             buf_size - acm_ssid->secondary_types_offset);
+    if ( ret < 0 )
         goto error_free_unlock;
 
     acm_ssid->len += ret;
     acm_ssid->secondary_max_types = ret;
 
-    if (copy_to_guest(buf, ssid_buffer, acm_ssid->len))
+    if ( copy_to_guest(buf, ssid_buffer, acm_ssid->len) )
         goto error_free_unlock;
 
     read_unlock(&acm_bin_pol_rwlock);
     xfree(ssid_buffer);
+
     return ACM_OK;
 
  error_free_unlock:
     read_unlock(&acm_bin_pol_rwlock);
     printk("%s: Error getting ssid.\n", __func__);
     xfree(ssid_buffer);
+
     return -ENOMEM;
 }
 
@@ -404,7 +430,8 @@ acm_get_decision(ssidref_t ssidref1, ssi
 acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2, u32 hook)
 {
     int ret = ACM_ACCESS_DENIED;
-    switch (hook) {
+    switch ( hook )
+    {
 
     case ACMHOOK_sharing:
         /* Sharing hook restricts access in STE policy only */
@@ -438,18 +465,21 @@ acm_check_used_ssidref(uint32_t policy_t
 
     read_lock(&ssid_list_rwlock);
 
-    for_each_acmssid( rawssid ) {
+    for_each_acmssid( rawssid )
+    {
         ssidref_t ssidref;
         void *s = GET_SSIDP(policy_type, rawssid);
 
-        if (policy_type == ACM_CHINESE_WALL_POLICY) {
+        if ( policy_type == ACM_CHINESE_WALL_POLICY )
+        {
             ssidref = ((struct chwall_ssid *)s)->chwall_ssidref;
         } else {
             ssidref = ((struct ste_ssid *)s)->ste_ssidref;
         }
         gdprintk(XENLOG_INFO,"domid=%d: search ssidref=%d, ssidref=%d\n",
                  rawssid->domainid,search_ssidref,ssidref);
-        if (ssidref == search_ssidref) {
+        if ( ssidref == search_ssidref )
+        {
             /* one is enough */
             acm_array_append_tuple(errors, ACM_SSIDREF_IN_USE, search_ssidref);
             rc = 1;
@@ -475,10 +505,13 @@ oldssid_to_newssid(const struct acm_ssid
 {
     uint i;
 
-    if (rawssid != NULL) {
+    if ( rawssid != NULL )
+    {
         ssidref_t ssid = rawssid->ssidref & 0xffff;
-        for (i = 0; i+1 < map->num_items; i += 2) {
-            if (map->array[i] == ssid) {
+        for ( i = 0; i + 1 < map->num_items; i += 2 )
+        {
+            if ( map->array[i] == ssid )
+            {
                 return (map->array[i+1] << 16 | map->array[i+1]);
             }
         }
@@ -491,7 +524,8 @@ oldssid_to_newssid(const struct acm_ssid
  * Assign an ssidref to the CHWALL policy component of the domain
  */
 static void
-acm_pri_policy_assign_ssidref(struct acm_ssid_domain *rawssid, ssidref_t 
new_ssid)
+acm_pri_policy_assign_ssidref(struct acm_ssid_domain *rawssid,
+                              ssidref_t new_ssid)
 {
     struct chwall_ssid *chwall = (struct chwall_ssid *)rawssid->primary_ssid;
     chwall->chwall_ssidref = new_ssid;
@@ -502,7 +536,8 @@ acm_pri_policy_assign_ssidref(struct acm
  * Assign an ssidref to the STE policy component of the domain
  */
 static void
-acm_sec_policy_assign_ssidref(struct acm_ssid_domain *rawssid, ssidref_t 
new_ssid)
+acm_sec_policy_assign_ssidref(struct acm_ssid_domain *rawssid,
+                              ssidref_t new_ssid)
 {
     struct ste_ssid *ste = (struct ste_ssid *)rawssid->secondary_ssid;
     ste->ste_ssidref = new_ssid;
@@ -521,13 +556,15 @@ acm_doms_change_ssidref(ssidref_t (*tran
 
     write_lock(&ssid_list_rwlock);
 
-    for_each_acmssid( rawssid ) {
+    for_each_acmssid( rawssid )
+    {
         ssidref_t new_ssid;
 
         rawssid->old_ssidref = rawssid->ssidref;
 
         new_ssid = translator_fn(rawssid, translation_map);
-        if (new_ssid == ACM_INVALID_SSIDREF) {
+        if ( new_ssid == ACM_INVALID_SSIDREF )
+        {
             /* means no mapping found, so no change -- old = new */
             continue;
         }
@@ -551,10 +588,11 @@ acm_doms_restore_ssidref(void)
 
     write_lock(&ssid_list_rwlock);
 
-    for_each_acmssid( rawssid ) {
+    for_each_acmssid( rawssid )
+    {
         ssidref_t old_ssid;
 
-        if (rawssid->old_ssidref == rawssid->ssidref)
+        if ( rawssid->old_ssidref == rawssid->ssidref )
             continue;
 
         old_ssid = rawssid->old_ssidref & 0xffff;
@@ -579,13 +617,15 @@ acm_check_deleted_ssidrefs(struct acm_si
     int rc = 0;
     uint idx;
     /* check for running domains that should not be there anymore */
-    for (idx = 0; idx < dels->num_items; idx++) {
-        if (acm_check_used_ssidref(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
-                                   dels->array[idx],
-                                   errors) > 0 ||
-            acm_check_used_ssidref(ACM_CHINESE_WALL_POLICY,
-                                   dels->array[idx],
-                                   errors) > 0) {
+    for ( idx = 0; idx < dels->num_items; idx++ )
+    {
+        if ( acm_check_used_ssidref(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+                                    dels->array[idx],
+                                    errors) > 0 ||
+             acm_check_used_ssidref(ACM_CHINESE_WALL_POLICY,
+                                    dels->array[idx],
+                                    errors) > 0)
+        {
             rc = ACM_ERROR;
             break;
         }
@@ -602,46 +642,56 @@ acm_change_policy(struct acm_change_poli
 {
     int rc = 0;
     u8 *binpolicy = NULL;
-    struct acm_sized_buffer dels = {
+    struct acm_sized_buffer dels =
+    {
         .array = NULL,
     };
-    struct acm_sized_buffer ssidmap = {
+    struct acm_sized_buffer ssidmap =
+    {
         .array = NULL,
     };
-    struct acm_sized_buffer errors = {
+    struct acm_sized_buffer errors =
+    {
         .array = NULL,
     };
 
     gdprintk(XENLOG_INFO, "change policy operation\n");
 
-    if ((chgpolicy->delarray_size > 4096) ||
-        (chgpolicy->chgarray_size > 4096) ||
-        (chgpolicy->errarray_size > 4096)) {
+    if ( (chgpolicy->delarray_size > 4096) ||
+         (chgpolicy->chgarray_size > 4096) ||
+         (chgpolicy->errarray_size > 4096))
+    {
         return ACM_ERROR;
     }
 
     dels.num_items = chgpolicy->delarray_size / sizeof(uint32_t);
-    if (dels.num_items > 0) {
+    if ( dels.num_items > 0 )
+    {
         dels.array = xmalloc_array(uint32_t, dels.num_items);
-        if (dels.array == NULL) {
+        if ( dels.array == NULL )
+        {
             rc = -ENOMEM;
             goto acm_chg_policy_exit;
         }
     }
 
     ssidmap.num_items = chgpolicy->chgarray_size / sizeof(uint32_t);
-    if (ssidmap.num_items > 0) {
+    if ( ssidmap.num_items > 0 )
+    {
         ssidmap.array = xmalloc_array(uint32_t, ssidmap.num_items);
-        if (ssidmap.array == NULL) {
+        if ( ssidmap.array == NULL )
+        {
             rc = -ENOMEM;
             goto acm_chg_policy_exit;
         }
     }
 
     errors.num_items = chgpolicy->errarray_size / sizeof(uint32_t);
-    if (errors.num_items > 0) {
+    if ( errors.num_items > 0 )
+    {
         errors.array = xmalloc_array(uint32_t, errors.num_items);
-        if (errors.array == NULL) {
+        if ( errors.array == NULL )
+        {
             rc = -ENOMEM;
             goto acm_chg_policy_exit;
         }
@@ -650,7 +700,8 @@ acm_change_policy(struct acm_change_poli
 
     binpolicy = xmalloc_array(u8,
                               chgpolicy->policy_pushcache_size);
-    if (binpolicy == NULL) {
+    if ( binpolicy == NULL )
+    {
         rc = -ENOMEM;
         goto acm_chg_policy_exit;
     }
@@ -663,7 +714,8 @@ acm_change_policy(struct acm_change_poli
                          chgpolicy->chgarray_size) ||
          copy_from_guest(binpolicy,
                          chgpolicy->policy_pushcache,
-                         chgpolicy->policy_pushcache_size )) {
+                         chgpolicy->policy_pushcache_size ))
+    {
         rc = -EFAULT;
         goto acm_chg_policy_exit;
     }
@@ -676,7 +728,8 @@ acm_change_policy(struct acm_change_poli
     if ( (errors.num_items > 0) &&
          copy_to_guest(chgpolicy->err_array,
                        errors.array,
-                       errors.num_items ) ) {
+                       errors.num_items ) )
+    {
         rc = -EFAULT;
         goto acm_chg_policy_exit;
     }
@@ -703,10 +756,10 @@ domid_to_newssid(const struct acm_ssid_d
 {
     domid_t domid = rawssid->domainid;
     uint i;
-    for (i = 0; (i+1) < map->num_items; i += 2) {
-        if (map->array[i] == domid) {
+    for ( i = 0; (i+1) < map->num_items; i += 2 )
+    {
+        if ( map->array[i] == domid )
             return (ssidref_t)map->array[i+1];
-        }
     }
     return ACM_INVALID_SSIDREF;
 }
@@ -725,7 +778,8 @@ do_acm_relabel_doms(struct acm_sized_buf
     /* run tests; collect as much error info as possible */
     irc =  do_chwall_init_state_curr(errors);
     irc += do_ste_init_state_curr(errors);
-    if (irc != 0) {
+    if ( irc != 0 )
+    {
         rc = -EFAULT;
         goto acm_relabel_doms_lock_err_exit;
     }
@@ -749,30 +803,37 @@ acm_relabel_domains(struct acm_relabel_d
 acm_relabel_domains(struct acm_relabel_doms *relabel)
 {
     int rc = ACM_OK;
-    struct acm_sized_buffer relabels = {
+    struct acm_sized_buffer relabels =
+    {
         .array = NULL,
     };
-    struct acm_sized_buffer errors = {
+    struct acm_sized_buffer errors =
+    {
         .array = NULL,
     };
 
-    if (relabel->relabel_map_size > 4096) {
+    if ( relabel->relabel_map_size > 4096 )
+    {
         return ACM_ERROR;
     }
 
     relabels.num_items = relabel->relabel_map_size / sizeof(uint32_t);
-    if (relabels.num_items > 0) {
+    if ( relabels.num_items > 0 )
+    {
         relabels.array = xmalloc_array(uint32_t, relabels.num_items);
-        if (relabels.array == NULL) {
+        if ( relabels.array == NULL )
+        {
             rc = -ENOMEM;
             goto acm_relabel_doms_exit;
         }
     }
 
     errors.num_items = relabel->errarray_size / sizeof(uint32_t);
-    if (errors.num_items > 0) {
+    if ( errors.num_items > 0 )
+    {
         errors.array = xmalloc_array(uint32_t, errors.num_items);
-        if (errors.array == NULL) {
+        if ( errors.array == NULL )
+        {
             rc = -ENOMEM;
             goto acm_relabel_doms_exit;
         }
@@ -781,7 +842,8 @@ acm_relabel_domains(struct acm_relabel_d
 
     if ( copy_from_guest(relabels.array,
                          relabel->relabel_map,
-                         relabel->relabel_map_size) ) {
+                         relabel->relabel_map_size) )
+    {
         rc = -EFAULT;
         goto acm_relabel_doms_exit;
     }
@@ -790,10 +852,8 @@ acm_relabel_domains(struct acm_relabel_d
 
     if ( copy_to_guest(relabel->err_array,
                        errors.array,
-                       errors.num_items ) ) {
+                       errors.num_items ) )
         rc = -EFAULT;
-        goto acm_relabel_doms_exit;
-    }
 
 acm_relabel_doms_exit:
     xfree(relabels.array);
diff -r 4a8dbbc16d48 -r dae6a2790f6b xen/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/acm/acm_simple_type_enforcement_hooks.c       Mon Jul 23 09:45:23 
2007 +0100
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c       Mon Jul 23 09:56:00 
2007 +0100
@@ -40,9 +40,10 @@ struct ste_binary_policy ste_bin_pol;
 
 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
     int i;
-    for(i=0; i< ste_bin_pol.max_types; i++)
+    for( i = 0; i< ste_bin_pol.max_types; i++ )
         if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] && 
-             ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
+             ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i])
+        {
             printkd("%s: common type #%02x.\n", __func__, i);
             return 1;
         }
@@ -55,17 +56,22 @@ static int share_common_type(struct doma
     ssidref_t ref_s, ref_o;
     int ret;
 
-    if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid 
== NULL))
+    if ( (subj == NULL) || (obj == NULL) ||
+         (subj->ssid == NULL) || (obj->ssid == NULL) )
         return 0;
+
     read_lock(&acm_bin_pol_rwlock);
+
     /* lookup the policy-local ssids */
-    ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                           (struct acm_ssid_domain 
*)subj->ssid)))->ste_ssidref;
+    ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+                       (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
     ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                           (struct acm_ssid_domain 
*)obj->ssid)))->ste_ssidref;
+                       (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
     /* check whether subj and obj share a common ste type */
     ret = have_common_type(ref_s, ref_o);
+
     read_unlock(&acm_bin_pol_rwlock);
+
     return ret;
 }
 
@@ -100,6 +106,7 @@ int acm_init_ste_policy(void)
     atomic_set(&(ste_bin_pol.gt_eval_count), 0);
     atomic_set(&(ste_bin_pol.gt_denied_count), 0);
     atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
+
     return ACM_OK;
 }
 
@@ -110,27 +117,29 @@ ste_init_domain_ssid(void **ste_ssid, ss
 {
     int i;
     struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
-    traceprintk("%s.\n", __func__);
-
-    if (ste_ssidp == NULL)
+
+    if ( ste_ssidp == NULL )
         return ACM_INIT_SSID_ERROR;
 
     /* get policy-local ssid reference */
-    ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
ssidref);
-    if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
-        (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
+    ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+                                         ssidref);
+
+    if ( (ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) )
+    {
         printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
                 __func__, ste_ssidp->ste_ssidref);
         xfree(ste_ssidp);
         return ACM_INIT_SSID_ERROR;
     }
     /* clean ste cache */
-    for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+    for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
         ste_ssidp->ste_cache[i].valid = ACM_STE_free;
 
     (*ste_ssid) = ste_ssidp;
     printkd("%s: determined ste_ssidref to %x.\n", 
             __func__, ste_ssidp->ste_ssidref);
+
     return ACM_OK;
 }
 
@@ -138,7 +147,6 @@ static void
 static void
 ste_free_domain_ssid(void *ste_ssid)
 {
-    traceprintk("%s.\n", __func__);
     xfree(ste_ssid);
     return;
 }
@@ -146,16 +154,18 @@ ste_free_domain_ssid(void *ste_ssid)
 /* dump type enforcement cache; policy read-locked already */
 static int 
 ste_dump_policy(u8 *buf, u32 buf_size) {
-    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    struct acm_ste_policy_buffer *ste_buf =
+                                  (struct acm_ste_policy_buffer *)buf;
     int ret = 0;
 
-    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+    if ( buf_size < sizeof(struct acm_ste_policy_buffer) )
         return -EINVAL;
 
     ste_buf->ste_max_types = cpu_to_be32(ste_bin_pol.max_types);
     ste_buf->ste_max_ssidrefs = cpu_to_be32(ste_bin_pol.max_ssidrefs);
     ste_buf->policy_code = cpu_to_be32(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
-    ste_buf->ste_ssid_offset = cpu_to_be32(sizeof(struct 
acm_ste_policy_buffer));
+    ste_buf->ste_ssid_offset =
+                           cpu_to_be32(sizeof(struct acm_ste_policy_buffer));
     ret = be32_to_cpu(ste_buf->ste_ssid_offset) +
         sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
 
@@ -173,10 +183,12 @@ ste_dump_policy(u8 *buf, u32 buf_size) {
     return ret;
 }
 
-/* ste_init_state is called when a policy is changed to detect violations 
(return != 0).
- * from a security point of view, we simulate that all running domains are 
re-started and
- * all sharing decisions are replayed to detect violations or current sharing 
behavior
- * (right now: event_channels, future: also grant_tables)
+/*
+ * ste_init_state is called when a policy is changed to detect violations
+ * (return != 0). from a security point of view, we simulate that all
+ * running domains are re-started and all sharing decisions are replayed
+ * to detect violations or current sharing behavior (right now:
+ * event_channels, future: also grant_tables)
  */ 
 static int
 ste_init_state(struct acm_sized_buffer *errors)
@@ -191,27 +203,35 @@ ste_init_state(struct acm_sized_buffer *
 
     rcu_read_lock(&domlist_read_lock);
     read_lock(&ssid_list_rwlock);
-    /* go through all domains and adjust policy as if this domain was started 
now */
+
+    /* go through all domains and adjust policy as if this domain was
+       started now */
+
     for_each_domain ( d )
     {
         struct evtchn *ports;
         unsigned int bucket;
+
         ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
                              (struct acm_ssid_domain *)d->ssid);
         ste_ssidref = ste_ssid->ste_ssidref;
         traceprintk("%s: validating policy for eventch domain %x 
(ste-Ref=%x).\n",
                     __func__, d->domain_id, ste_ssidref);
         /* a) check for event channel conflicts */
-        for (bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++) {
+        for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
+        {
             spin_lock(&d->evtchn_lock);
             ports = d->evtchn[bucket];
-            if (ports == NULL) {
+            if ( ports == NULL)
+            {
                 spin_unlock(&d->evtchn_lock);
                 break;
             }
 
-            for (port=0; port < EVTCHNS_PER_BUCKET; port++) {
-                if (ports[port].state == ECS_INTERDOMAIN) {
+            for ( port = 0; port < EVTCHNS_PER_BUCKET; port++ )
+            {
+                if ( ports[port].state == ECS_INTERDOMAIN )
+                {
                     rdom = ports[port].u.interdomain.remote_dom;
                     rdomid = rdom->domain_id;
                 } else {
@@ -227,7 +247,8 @@ ste_init_state(struct acm_sized_buffer *
                             __func__, d->domain_id, ste_ssidref,
                             rdom->domain_id, ste_rssidref, port);
                 /* check whether on subj->ssid, obj->ssid share a common type*/
-                if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                if ( ! have_common_type(ste_ssidref, ste_rssidref) )
+                {
                     printkd("%s: Policy violation in event channel domain "
                             "%x -> domain %x.\n",
                             __func__, d->domain_id, rdomid);
@@ -245,7 +266,8 @@ ste_init_state(struct acm_sized_buffer *
 
         /* b) check for grant table conflicts on shared pages */
         spin_lock(&d->grant_table->lock);
-        for ( i = 0; i < nr_active_grant_frames(d->grant_table); i++ ) {
+        for ( i = 0; i < nr_active_grant_frames(d->grant_table); i++ )
+        {
 #define APP (PAGE_SIZE / sizeof(struct active_grant_entry))
             act = &d->grant_table->active[i/APP][i%APP];
             if ( act->pin != 0 ) {
@@ -254,7 +276,8 @@ ste_init_state(struct acm_sized_buffer *
                         __func__, d->domain_id, i, act->pin,
                         act->domid, (unsigned long)act->frame);
                 rdomid = act->domid;
-                if ((rdom = rcu_lock_domain_by_id(rdomid)) == NULL) {
+                if ( (rdom = rcu_lock_domain_by_id(rdomid)) == NULL )
+                {
                     spin_unlock(&d->grant_table->lock);
                     printkd("%s: domain not found ERROR!\n", __func__);
 
@@ -268,7 +291,8 @@ ste_init_state(struct acm_sized_buffer *
                                       (struct acm_ssid_domain *)(rdom->ssid));
                 ste_rssidref = ste_rssid->ste_ssidref;
                 rcu_unlock_domain(rdom);
-                if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                if ( ! have_common_type(ste_ssidref, ste_rssidref) )
+                {
                     spin_unlock(&d->grant_table->lock);
                     printkd("%s: Policy violation in grant table "
                             "sharing domain %x -> domain %x.\n",
@@ -288,11 +312,14 @@ ste_init_state(struct acm_sized_buffer *
     read_unlock(&ssid_list_rwlock);
     rcu_read_unlock(&domlist_read_lock);
     return violation;
-    /* returning "violation != 0" means that existing sharing between domains 
would not 
-     * have been allowed if the new policy had been enforced before the 
sharing; for ste, 
-     * this means that there are at least 2 domains that have established 
sharing through 
-     * event-channels or grant-tables but these two domains don't have no 
longer a common 
-     * type in their typesets referenced by their ssidrefs */
+    /*
+       returning "violation != 0" means that existing sharing between domains
+       would not have been allowed if the new policy had been enforced before
+       the sharing; for ste, this means that there are at least 2 domains
+       that have established sharing through event-channels or grant-tables
+       but these two domains don't have no longer a common type in their
+       typesets referenced by their ssidrefs
+      */
 }
 
 
@@ -312,7 +339,8 @@ _ste_update_policy(u8 *buf, u32 buf_size
                    struct acm_sized_buffer *errors)
 {
     int rc = -EFAULT;
-    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    struct acm_ste_policy_buffer *ste_buf =
+                                 (struct acm_ste_policy_buffer *)buf;
     void *ssidrefsbuf;
     struct ste_ssid *ste_ssid;
     struct acm_ssid_domain *rawssid;
@@ -320,11 +348,17 @@ _ste_update_policy(u8 *buf, u32 buf_size
 
 
     /* 1. create and copy-in new ssidrefs buffer */
-    ssidrefsbuf = xmalloc_array(u8, 
sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
-    if (ssidrefsbuf == NULL) {
+    ssidrefsbuf = xmalloc_array(u8,
+                                sizeof(domaintype_t) *
+                                 ste_buf->ste_max_types *
+                                 ste_buf->ste_max_ssidrefs);
+    if ( ssidrefsbuf == NULL ) {
         return -ENOMEM;
     }
-    if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * 
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
+    if ( ste_buf->ste_ssid_offset +
+         sizeof(domaintype_t) *
+         ste_buf->ste_max_ssidrefs *
+         ste_buf->ste_max_types > buf_size )
         goto error_free;
 
     arrcpy(ssidrefsbuf, 
@@ -333,18 +367,23 @@ _ste_update_policy(u8 *buf, u32 buf_size
            ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
 
 
-    /* 3. in test mode: re-calculate sharing decisions based on running 
domains;
-     *    this can fail if new policy is conflicting with sharing of running 
domains 
-     *    now: reject violating new policy; future: adjust sharing through 
revoking sharing */
-
-    if (test_only) {
+    /*
+     * 3. in test mode: re-calculate sharing decisions based on running
+     *    domains; this can fail if new policy is conflicting with sharing
+     *    of running domains
+     *    now: reject violating new policy; future: adjust sharing through
+     *    revoking sharing
+     */
+
+    if ( test_only ) {
         /* temporarily replace old policy with new one for the testing */
         struct ste_binary_policy orig_ste_bin_pol = ste_bin_pol;
         ste_bin_pol.max_types = ste_buf->ste_max_types;
         ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
         ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
 
-        if (ste_init_state(NULL)) {
+        if ( ste_init_state(NULL) )
+        {
             /* new policy conflicts with sharing of running domains */
             printk("%s: New policy conflicts with running domains. "
                    "Policy load aborted.\n", __func__);
@@ -365,9 +404,10 @@ _ste_update_policy(u8 *buf, u32 buf_size
     /* clear all ste caches */
     read_lock(&ssid_list_rwlock);
 
-    for_each_acmssid( rawssid ) {
+    for_each_acmssid( rawssid )
+    {
         ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, rawssid);
-        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+        for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
             ste_ssid->ste_cache[i].valid = ACM_STE_free;
     }
 
@@ -376,7 +416,8 @@ _ste_update_policy(u8 *buf, u32 buf_size
     return ACM_OK;
 
  error_free:
-    if (!test_only) printk("%s: ERROR setting policy.\n", __func__);
+    if ( !test_only )
+        printk("%s: ERROR setting policy.\n", __func__);
     xfree(ssidrefsbuf);
     return rc;
 }
@@ -388,7 +429,7 @@ ste_test_policy(u8 *buf, u32 buf_size, i
     struct acm_ste_policy_buffer *ste_buf =
              (struct acm_ste_policy_buffer *)buf;
 
-    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+    if ( buf_size < sizeof(struct acm_ste_policy_buffer) )
         return -EINVAL;
 
     /* Convert endianess of policy */
@@ -399,12 +440,12 @@ ste_test_policy(u8 *buf, u32 buf_size, i
     ste_buf->ste_ssid_offset = be32_to_cpu(ste_buf->ste_ssid_offset);
 
     /* policy type and version checks */
-    if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
-        (ste_buf->policy_version != ACM_STE_VERSION))
+    if ( (ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
+         (ste_buf->policy_version != ACM_STE_VERSION) )
         return -EINVAL;
 
     /* during boot dom0_chwall_ssidref is set */
-    if (is_bootpolicy && (dom0_ste_ssidref >= ste_buf->ste_max_ssidrefs))
+    if ( is_bootpolicy && (dom0_ste_ssidref >= ste_buf->ste_max_ssidrefs) )
         return -EINVAL;
 
     return _ste_update_policy(buf, buf_size, 1, errors);
@@ -422,17 +463,24 @@ ste_dump_stats(u8 *buf, u16 buf_len)
     struct acm_ste_stats_buffer stats;
 
     /* now send the hook counts to user space */
-    stats.ec_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_eval_count));
-    stats.gt_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_eval_count));
-    stats.ec_denied_count = 
cpu_to_be32(atomic_read(&ste_bin_pol.ec_denied_count));
-    stats.gt_denied_count = 
cpu_to_be32(atomic_read(&ste_bin_pol.gt_denied_count));
-    stats.ec_cachehit_count = 
cpu_to_be32(atomic_read(&ste_bin_pol.ec_cachehit_count));
-    stats.gt_cachehit_count = 
cpu_to_be32(atomic_read(&ste_bin_pol.gt_cachehit_count));
-
-    if (buf_len < sizeof(struct acm_ste_stats_buffer))
+    stats.ec_eval_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.ec_eval_count));
+    stats.gt_eval_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.gt_eval_count));
+    stats.ec_denied_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.ec_denied_count));
+    stats.gt_denied_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.gt_denied_count));
+    stats.ec_cachehit_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.ec_cachehit_count));
+    stats.gt_cachehit_count =
+                    cpu_to_be32(atomic_read(&ste_bin_pol.gt_cachehit_count));
+
+    if ( buf_len < sizeof(struct acm_ste_stats_buffer) )
         return -ENOMEM;
 
     memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
+
     return sizeof(struct acm_ste_stats_buffer);
 }
 
@@ -442,14 +490,15 @@ ste_dump_ssid_types(ssidref_t ssidref, u
     int i;
 
     /* fill in buffer */
-    if (ste_bin_pol.max_types > len)
+    if ( ste_bin_pol.max_types > len )
         return -EFAULT;
 
-    if (ssidref >= ste_bin_pol.max_ssidrefs)
+    if ( ssidref >= ste_bin_pol.max_ssidrefs )
         return -EFAULT;
 
     /* read types for chwall ssidref */
-    for(i=0; i< ste_bin_pol.max_types; i++) {
+    for( i = 0; i< ste_bin_pol.max_types; i++ )
+    {
         if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
             buf[i] = 1;
         else
@@ -461,7 +510,8 @@ ste_dump_ssid_types(ssidref_t ssidref, u
 /* we need to go through this before calling the hooks,
  * returns 1 == cache hit */
 static int inline
-check_cache(struct domain *dom, domid_t rdom) {
+check_cache(struct domain *dom, domid_t rdom)
+{
     struct ste_ssid *ste_ssid;
     int i;
 
@@ -472,10 +522,14 @@ check_cache(struct domain *dom, domid_t 
     ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
                          (struct acm_ssid_domain *)(dom->ssid));
 
-    for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
-        if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
-            (ste_ssid->ste_cache[i].id == rdom)) {
-            printkd("cache hit (entry %x, id= %x!\n", i, 
ste_ssid->ste_cache[i].id);
+    for( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
+    {
+        if ( (ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
+             (ste_ssid->ste_cache[i].id == rdom) )
+        {
+            printkd("cache hit (entry %x, id= %x!\n",
+                    i,
+                    ste_ssid->ste_cache[i].id);
             return 1;
         }
     }
@@ -488,15 +542,21 @@ cache_result(struct domain *subj, struct
 cache_result(struct domain *subj, struct domain *obj) {
     struct ste_ssid *ste_ssid;
     int i;
-    printkd("caching from doms: %x --> %x.\n", subj->domain_id, 
obj->domain_id);
-    if (subj->ssid == NULL)
+
+    printkd("caching from doms: %x --> %x.\n",
+            subj->domain_id, obj->domain_id);
+
+    if ( subj->ssid == NULL )
         return;
+
     ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
                          (struct acm_ssid_domain *)(subj)->ssid);
-    for(i=0; i< ACM_TE_CACHE_SIZE; i++)
-        if (ste_ssid->ste_cache[i].valid == ACM_STE_free)
+
+    for( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
+        if ( ste_ssid->ste_cache[i].valid == ACM_STE_free )
             break;
-    if (i< ACM_TE_CACHE_SIZE) {
+    if ( i < ACM_TE_CACHE_SIZE )
+    {
         ste_ssid->ste_cache[i].valid = ACM_STE_valid;
         ste_ssid->ste_cache[i].id = obj->domain_id;
     } else
@@ -512,20 +572,23 @@ clean_id_from_cache(domid_t id)
     struct acm_ssid_domain *rawssid;
 
     printkd("deleting cache for dom %x.\n", id);
+
     read_lock(&ssid_list_rwlock);
     /* look through caches of all domains */
 
-    for_each_acmssid ( rawssid ) {
-
+    for_each_acmssid ( rawssid )
+    {
         ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, rawssid);
-        if (!ste_ssid) {
+
+        if ( !ste_ssid )
+        {
             printk("%s: deleting ID from cache ERROR (no ste_ssid)!\n",
                    __func__);
             goto out;
         }
-        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-            if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
-                (ste_ssid->ste_cache[i].id == id))
+        for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
+            if ( (ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
+                 (ste_ssid->ste_cache[i].id == id) )
                 ste_ssid->ste_cache[i].valid = ACM_STE_free;
     }
 
@@ -544,19 +607,19 @@ ste_pre_domain_create(void *subject_ssid
     traceprintk("%s.\n", __func__);
 
     read_lock(&acm_bin_pol_rwlock);
+
     ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
-    if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
-        printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", 
__func__);
-        read_unlock(&acm_bin_pol_rwlock);
-        return ACM_ACCESS_DENIED; /* catching and indicating config error */
-    }
-    if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
+
+    if ( ste_ssidref >= ste_bin_pol.max_ssidrefs )
+    {
         printk("%s: ERROR ste_ssidref > max(%x).\n", 
                __func__, ste_bin_pol.max_ssidrefs-1);
         read_unlock(&acm_bin_pol_rwlock);
         return ACM_ACCESS_DENIED;
     }
+
     read_unlock(&acm_bin_pol_rwlock);
+
     return ACM_ACCESS_PERMITTED;
 }
 
@@ -583,34 +646,42 @@ ste_pre_eventchannel_unbound(domid_t id1
                 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
                 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
 
-    if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
-    if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
+    if ( id1 == DOMID_SELF )
+        id1 = current->domain->domain_id;
+    if ( id2 == DOMID_SELF )
+        id2 = current->domain->domain_id;
 
     subj = rcu_lock_domain_by_id(id1);
     obj  = rcu_lock_domain_by_id(id2);
-    if ((subj == NULL) || (obj == NULL)) {
+    if ( (subj == NULL) || (obj == NULL) )
+    {
         ret = ACM_ACCESS_DENIED;
         goto out;
     }
     /* cache check late */
-    if (check_cache(subj, obj->domain_id)) {
+    if ( check_cache(subj, obj->domain_id) )
+    {
         atomic_inc(&ste_bin_pol.ec_cachehit_count);
         ret = ACM_ACCESS_PERMITTED;
         goto out;
     }
     atomic_inc(&ste_bin_pol.ec_eval_count);
 
-    if (share_common_type(subj, obj)) {
+    if ( share_common_type(subj, obj) )
+    {
         cache_result(subj, obj);
         ret = ACM_ACCESS_PERMITTED;
-    } else {
+    }
+    else
+    {
         atomic_inc(&ste_bin_pol.ec_denied_count);
         ret = ACM_ACCESS_DENIED;
     }
+
   out:
-    if (obj != NULL)
+    if ( obj != NULL )
         rcu_unlock_domain(obj);
-    if (subj != NULL)
+    if ( subj != NULL )
         rcu_unlock_domain(subj);
     return ret;
 }
@@ -628,17 +699,20 @@ ste_pre_eventchannel_interdomain(domid_t
     /* following is a bit longer but ensures that we
      * "put" only domains that we where "find"-ing 
      */
-    if (id == DOMID_SELF) id = current->domain->domain_id;
+    if ( id == DOMID_SELF )
+        id = current->domain->domain_id;
 
     subj = current->domain;
     obj  = rcu_lock_domain_by_id(id);
-    if (obj == NULL) {
+    if ( obj == NULL )
+    {
         ret = ACM_ACCESS_DENIED;
         goto out;
     }
 
     /* cache check late, but evtchn is not on performance critical path */
-    if (check_cache(subj, obj->domain_id)) {
+    if ( check_cache(subj, obj->domain_id) )
+    {
         atomic_inc(&ste_bin_pol.ec_cachehit_count);
         ret = ACM_ACCESS_PERMITTED;
         goto out;
@@ -646,15 +720,19 @@ ste_pre_eventchannel_interdomain(domid_t
 
     atomic_inc(&ste_bin_pol.ec_eval_count);
 
-    if (share_common_type(subj, obj)) {
+    if ( share_common_type(subj, obj) )
+    {
         cache_result(subj, obj);
         ret = ACM_ACCESS_PERMITTED;
-    } else {
+    }
+    else
+    {
         atomic_inc(&ste_bin_pol.ec_denied_count);
         ret = ACM_ACCESS_DENIED;
     }
+
  out:
-    if (obj != NULL)
+    if ( obj != NULL )
         rcu_unlock_domain(obj);
     return ret;
 }
@@ -662,13 +740,15 @@ ste_pre_eventchannel_interdomain(domid_t
 /* -------- SHARED MEMORY OPERATIONS -----------*/
 
 static int
-ste_pre_grant_map_ref (domid_t id) {
+ste_pre_grant_map_ref (domid_t id)
+{
     struct domain *obj, *subj;
     int ret;
     traceprintk("%s: dom%x-->dom%x.\n", __func__,
                 current->domain->domain_id, id);
 
-    if (check_cache(current->domain, id)) {
+    if ( check_cache(current->domain, id) )
+    {
         atomic_inc(&ste_bin_pol.gt_cachehit_count);
         return ACM_ACCESS_PERMITTED;
     }
@@ -676,15 +756,18 @@ ste_pre_grant_map_ref (domid_t id) {
     subj = current->domain;
     obj = rcu_lock_domain_by_id(id);
 
-    if (share_common_type(subj, obj)) {
+    if ( share_common_type(subj, obj) )
+    {
         cache_result(subj, obj);
         ret = ACM_ACCESS_PERMITTED;
-    } else {
+    }
+    else
+    {
         atomic_inc(&ste_bin_pol.gt_denied_count);
         printkd("%s: ACCESS DENIED!\n", __func__);
         ret = ACM_ACCESS_DENIED;
     }
-    if (obj != NULL)
+    if ( obj != NULL )
         rcu_unlock_domain(obj);
     return ret;
 }
@@ -694,34 +777,41 @@ ste_pre_grant_map_ref (domid_t id) {
    flow from the creating domain to the domain that is setup, we 
    check types in addition to the general authorization */
 static int
-ste_pre_grant_setup (domid_t id) {
+ste_pre_grant_setup (domid_t id)
+{
     struct domain *obj, *subj;
     int ret;
     traceprintk("%s: dom%x-->dom%x.\n", __func__,
                 current->domain->domain_id, id);
 
-    if (check_cache(current->domain, id)) {
+    if ( check_cache(current->domain, id) )
+    {
         atomic_inc(&ste_bin_pol.gt_cachehit_count);
         return ACM_ACCESS_PERMITTED;
     }
     atomic_inc(&ste_bin_pol.gt_eval_count);
     /* a) check authorization (eventually use specific capabilities) */
-    if (!IS_PRIV(current->domain)) {
-        printk("%s: Grant table management authorization denied ERROR!\n", 
__func__);
+    if ( !IS_PRIV(current->domain) )
+    {
+        printk("%s: Grant table management authorization denied ERROR!\n",
+               __func__);
         return ACM_ACCESS_DENIED;
     }
     /* b) check types */
     subj = current->domain;
     obj = rcu_lock_domain_by_id(id);
 
-    if (share_common_type(subj, obj)) {
+    if ( share_common_type(subj, obj) )
+    {
         cache_result(subj, obj);
         ret = ACM_ACCESS_PERMITTED;
-    } else {
+    }
+    else
+    {
         atomic_inc(&ste_bin_pol.gt_denied_count);
         ret = ACM_ACCESS_DENIED;
     }
-    if (obj != NULL)
+    if ( obj != NULL )
         rcu_unlock_domain(obj);
     return ret;
 }
@@ -729,46 +819,42 @@ ste_pre_grant_setup (domid_t id) {
 /* -------- DOMAIN-Requested Decision hooks -----------*/
 
 static int
-ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
-    if (have_common_type (
+ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
+{
+    int hct = have_common_type(
         GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
-        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
-        ))
-        return ACM_ACCESS_PERMITTED;
-    else
-        return ACM_ACCESS_DENIED;
-}
-
-/* */
+        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2));
+    return (hct ? ACM_ACCESS_PERMITTED : ACM_ACCESS_DENIED);
+}
 
 static int
 ste_is_default_policy(void)
 {
-    return ( (ste_bin_pol.max_types    == 1) &&
-             (ste_bin_pol.max_ssidrefs == 2) );
+    return ((ste_bin_pol.max_types    == 1) &&
+            (ste_bin_pol.max_ssidrefs == 2));
 }
 
 /* now define the hook structure similarly to LSM */
 struct acm_operations acm_simple_type_enforcement_ops = {
 
     /* policy management services */
-    .init_domain_ssid  = ste_init_domain_ssid,
-    .free_domain_ssid  = ste_free_domain_ssid,
+    .init_domain_ssid       = ste_init_domain_ssid,
+    .free_domain_ssid       = ste_free_domain_ssid,
     .dump_binary_policy     = ste_dump_policy,
     .test_binary_policy     = ste_test_policy,
     .set_binary_policy      = ste_set_policy,
-    .dump_statistics  = ste_dump_stats,
+    .dump_statistics        = ste_dump_stats,
     .dump_ssid_types        = ste_dump_ssid_types,
 
     /* domain management control hooks */
-    .domain_create = ste_domain_create,
-    .domain_destroy    = ste_domain_destroy,
+    .domain_create          = ste_domain_create,
+    .domain_destroy         = ste_domain_destroy,
 
     /* event channel control hooks */
-    .pre_eventchannel_unbound   = ste_pre_eventchannel_unbound,
+    .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
     .fail_eventchannel_unbound = NULL,
     .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
-    .fail_eventchannel_interdomain  = NULL,
+    .fail_eventchannel_interdomain = NULL,
 
     /* grant table control hooks */
     .pre_grant_map_ref      = ste_pre_grant_map_ref,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.