[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merged.



# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID f31494465fb07d9f790716683d7b92eab4ac8835
# Parent  19af31a595377b48400441d8235ebbaff670e18a
# Parent  c3a0f492644cf3288e4144d85d73efc831d7333f
Merged.

diff -r 19af31a59537 -r f31494465fb0 tools/ioemu/exec-all.h
--- a/tools/ioemu/exec-all.h    Fri Oct 21 11:06:17 2005
+++ b/tools/ioemu/exec-all.h    Fri Oct 21 11:07:14 2005
@@ -573,7 +573,7 @@
 }
 #endif
 
-#define DEBUG_UNUSED_IOPORT
-#define DEBUG_IOPORT
+//#define DEBUG_UNUSED_IOPORT
+//#define DEBUG_IOPORT
 #define TARGET_VMX
 
diff -r 19af31a59537 -r f31494465fb0 tools/ioemu/hw/cirrus_vga_rop2.h
--- a/tools/ioemu/hw/cirrus_vga_rop2.h  Fri Oct 21 11:06:17 2005
+++ b/tools/ioemu/hw/cirrus_vga_rop2.h  Fri Oct 21 11:07:14 2005
@@ -47,6 +47,11 @@
     int x, y, pattern_y, pattern_pitch, pattern_x;
     unsigned int col;
     const uint8_t *src1;
+#if DEPTH == 24
+    int skipleft = s->gr[0x2f] & 0x1f;
+#else
+    int skipleft = (s->gr[0x2f] & 0x07) * (DEPTH / 8);
+#endif
 
 #if DEPTH == 8
     pattern_pitch = 8;
@@ -56,11 +61,11 @@
     pattern_pitch = 32;
 #endif
     pattern_y = s->cirrus_blt_srcaddr & 7;
-    pattern_x = 0;
-    for(y = 0; y < bltheight; y++) {
-        d = dst;
+    for(y = 0; y < bltheight; y++) {
+        pattern_x = skipleft;
+        d = dst + skipleft;
         src1 = src + pattern_y * pattern_pitch;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        for (x = skipleft; x < bltwidth; x += (DEPTH / 8)) {
 #if DEPTH == 8
             col = src1[pattern_x];
             pattern_x = (pattern_x + 1) & 7;
@@ -99,7 +104,13 @@
     unsigned int col;
     unsigned bitmask;
     unsigned index;
-    int srcskipleft = 0;
+#if DEPTH == 24
+    int dstskipleft = s->gr[0x2f] & 0x1f;
+    int srcskipleft = dstskipleft / 3;
+#else
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
+#endif
 
     if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) {
         bits_xor = 0xff;
@@ -112,8 +123,8 @@
     for(y = 0; y < bltheight; y++) {
         bitmask = 0x80 >> srcskipleft;
         bits = *src++ ^ bits_xor;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bitmask & 0xff) == 0) {
                 bitmask = 0x80;
                 bits = *src++ ^ bits_xor;
@@ -142,15 +153,16 @@
     unsigned bits;
     unsigned int col;
     unsigned bitmask;
-    int srcskipleft = 0;
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
 
     colors[0] = s->cirrus_blt_bgcol;
     colors[1] = s->cirrus_blt_fgcol;
     for(y = 0; y < bltheight; y++) {
         bitmask = 0x80 >> srcskipleft;
         bits = *src++;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bitmask & 0xff) == 0) {
                 bitmask = 0x80;
                 bits = *src++;
@@ -175,6 +187,13 @@
     int x, y, bitpos, pattern_y;
     unsigned int bits, bits_xor;
     unsigned int col;
+#if DEPTH == 24
+    int dstskipleft = s->gr[0x2f] & 0x1f;
+    int srcskipleft = dstskipleft / 3;
+#else
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
+#endif
 
     if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) {
         bits_xor = 0xff;
@@ -187,9 +206,9 @@
 
     for(y = 0; y < bltheight; y++) {
         bits = src[pattern_y] ^ bits_xor;
-        bitpos = 7;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        bitpos = 7 - srcskipleft;
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bits >> bitpos) & 1) {
                 PUTPIXEL();
             }
@@ -213,6 +232,8 @@
     int x, y, bitpos, pattern_y;
     unsigned int bits;
     unsigned int col;
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
 
     colors[0] = s->cirrus_blt_bgcol;
     colors[1] = s->cirrus_blt_fgcol;
@@ -220,9 +241,9 @@
 
     for(y = 0; y < bltheight; y++) {
         bits = src[pattern_y];
-        bitpos = 7;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        bitpos = 7 - srcskipleft;
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             col = colors[(bits >> bitpos) & 1];
             PUTPIXEL();
             d += (DEPTH / 8);
diff -r 19af31a59537 -r f31494465fb0 tools/ioemu/hw/i8259.c
--- a/tools/ioemu/hw/i8259.c    Fri Oct 21 11:06:17 2005
+++ b/tools/ioemu/hw/i8259.c    Fri Oct 21 11:07:14 2005
@@ -29,7 +29,7 @@
 //#define DEBUG_PIC
 
 //#define DEBUG_IRQ_LATENCY
-#define DEBUG_IRQ_COUNT
+//#define DEBUG_IRQ_COUNT
 
 extern void pit_reset_vmx_vectors();
 
diff -r 19af31a59537 -r f31494465fb0 tools/ioemu/monitor.c
--- a/tools/ioemu/monitor.c     Fri Oct 21 11:06:17 2005
+++ b/tools/ioemu/monitor.c     Fri Oct 21 11:07:14 2005
@@ -232,6 +232,161 @@
     exit(0);
 }
 
+typedef struct {
+    int keycode;
+    const char *name;
+} KeyDef;
+
+static const KeyDef key_defs[] = {
+    { 0x2a, "shift" },
+    { 0x36, "shift_r" },
+    
+    { 0x38, "alt" },
+    { 0xb8, "alt_r" },
+    { 0x1d, "ctrl" },
+    { 0x9d, "ctrl_r" },
+
+    { 0xdd, "menu" },
+
+    { 0x01, "esc" },
+
+    { 0x02, "1" },
+    { 0x03, "2" },
+    { 0x04, "3" },
+    { 0x05, "4" },
+    { 0x06, "5" },
+    { 0x07, "6" },
+    { 0x08, "7" },
+    { 0x09, "8" },
+    { 0x0a, "9" },
+    { 0x0b, "0" },
+    { 0x0e, "backspace" },
+
+    { 0x0f, "tab" },
+    { 0x10, "q" },
+    { 0x11, "w" },
+    { 0x12, "e" },
+    { 0x13, "r" },
+    { 0x14, "t" },
+    { 0x15, "y" },
+    { 0x16, "u" },
+    { 0x17, "i" },
+    { 0x18, "o" },
+    { 0x19, "p" },
+
+    { 0x1c, "ret" },
+
+    { 0x1e, "a" },
+    { 0x1f, "s" },
+    { 0x20, "d" },
+    { 0x21, "f" },
+    { 0x22, "g" },
+    { 0x23, "h" },
+    { 0x24, "j" },
+    { 0x25, "k" },
+    { 0x26, "l" },
+
+    { 0x2c, "z" },
+    { 0x2d, "x" },
+    { 0x2e, "c" },
+    { 0x2f, "v" },
+    { 0x30, "b" },
+    { 0x31, "n" },
+    { 0x32, "m" },
+    
+    { 0x39, "spc" },
+    { 0x3a, "caps_lock" },
+    { 0x3b, "f1" },
+    { 0x3c, "f2" },
+    { 0x3d, "f3" },
+    { 0x3e, "f4" },
+    { 0x3f, "f5" },
+    { 0x40, "f6" },
+    { 0x41, "f7" },
+    { 0x42, "f8" },
+    { 0x43, "f9" },
+    { 0x44, "f10" },
+    { 0x45, "num_lock" },
+    { 0x46, "scroll_lock" },
+
+    { 0x56, "<" },
+
+    { 0x57, "f11" },
+    { 0x58, "f12" },
+
+    { 0xb7, "print" },
+
+    { 0xc7, "home" },
+    { 0xc9, "pgup" },
+    { 0xd1, "pgdn" },
+    { 0xcf, "end" },
+
+    { 0xcb, "left" },
+    { 0xc8, "up" },
+    { 0xd0, "down" },
+    { 0xcd, "right" },
+
+    { 0xd2, "insert" },
+    { 0xd3, "delete" },
+    { 0, NULL },
+};
+
+static int get_keycode(const char *key)
+{
+    const KeyDef *p;
+
+    for(p = key_defs; p->name != NULL; p++) {
+        if (!strcmp(key, p->name))
+            return p->keycode;
+    }
+    return -1;
+}
+
+static void do_send_key(const char *string)
+{
+    char keybuf[16], *q;
+    uint8_t keycodes[16];
+    const char *p;
+    int nb_keycodes, keycode, i;
+    
+    nb_keycodes = 0;
+    p = string;
+    while (*p != '\0') {
+        q = keybuf;
+        while (*p != '\0' && *p != '-') {
+            if ((q - keybuf) < sizeof(keybuf) - 1) {
+                *q++ = *p;
+            }
+            p++;
+        }
+        *q = '\0';
+        keycode = get_keycode(keybuf);
+        if (keycode < 0) {
+            term_printf("unknown key: '%s'\n", keybuf);
+            return;
+        }
+        keycodes[nb_keycodes++] = keycode;
+        if (*p == '\0')
+            break;
+        p++;
+    }
+    /* key down events */
+    for(i = 0; i < nb_keycodes; i++) {
+        keycode = keycodes[i];
+        if (keycode & 0x80)
+            kbd_put_keycode(0xe0);
+        kbd_put_keycode(keycode & 0x7f);
+    }
+    /* key up events */
+    for(i = nb_keycodes - 1; i >= 0; i--) {
+        keycode = keycodes[i];
+        if (keycode & 0x80)
+            kbd_put_keycode(0xe0);
+        kbd_put_keycode(keycode | 0x80);
+    }
+}
+
+
 static int eject_device(BlockDriverState *bs, int force)
 {
     if (bdrv_is_inserted(bs)) {
@@ -331,6 +486,8 @@
       "item1[,...]", "activate logging of the specified items to 
'/tmp/qemu.log'" },
     { "q|quit", "", do_quit,
       "", "quit the emulator" },
+    { "sendkey", "s", do_send_key, 
+      "keys", "send keys to the VM (e.g. 'sendkey ctrl-alt-f1')" },
     { NULL, NULL, }, 
 };
 
diff -r 19af31a59537 -r f31494465fb0 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Fri Oct 21 11:06:17 2005
+++ b/tools/python/xen/xend/image.py    Fri Oct 21 11:07:14 2005
@@ -311,7 +311,8 @@
                   "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
         args = args + self.dmargs
         env = dict(os.environ)
-        env['DISPLAY'] = self.display
+        if self.display:
+            env['DISPLAY'] = self.display
         log.info("spawning device models: %s %s", self.device_model, args)
         self.pid = os.spawnve(os.P_NOWAIT, self.device_model, args, env)
         log.info("device model pid: %d", self.pid)
diff -r 19af31a59537 -r f31494465fb0 tools/security/Makefile
--- a/tools/security/Makefile   Fri Oct 21 11:06:17 2005
+++ b/tools/security/Makefile   Fri Oct 21 11:07:14 2005
@@ -43,6 +43,7 @@
 build: mk-symlinks
        $(MAKE) secpol_tool
        $(MAKE) secpol_xml2bin
+       $(MAKE) get_decision
        chmod 700 ./setlabel.sh
        chmod 700 ./updategrub.sh
        chmod 700 ./getlabel.sh
diff -r 19af31a59537 -r f31494465fb0 tools/security/secpol_tool.c
--- a/tools/security/secpol_tool.c      Fri Oct 21 11:06:17 2005
+++ b/tools/security/secpol_tool.c      Fri Oct 21 11:07:14 2005
@@ -67,7 +67,7 @@
                         (unsigned long) hypercall);
 }
 
-static inline int do_acm_op(int xc_handle, acm_op_t * op)
+static inline int do_acm_op(int xc_handle, struct acm_op * op)
 {
     int ret = -1;
     privcmd_hypercall_t hypercall;
@@ -275,10 +275,10 @@
 /******************************* get policy ******************************/
 
 #define PULL_CACHE_SIZE                8192
-u8 pull_buffer[PULL_CACHE_SIZE];
+uint8_t pull_buffer[PULL_CACHE_SIZE];
 int acm_domain_getpolicy(int xc_handle)
 {
-    acm_op_t op;
+    struct acm_op op;
     int ret;
 
     memset(pull_buffer, 0x00, sizeof(pull_buffer));
@@ -299,7 +299,7 @@
     struct stat mystat;
     int ret, fd;
     off_t len;
-    u8 *buffer;
+    uint8_t *buffer;
 
     if ((ret = stat(filename, &mystat)))
     {
@@ -321,7 +321,7 @@
     }
     if (len == read(fd, buffer, len))
     {
-        acm_op_t op;
+        struct acm_op op;
         /* dump it and then push it down into xen/acm */
         acm_dump_policy_buffer(buffer, len);
         op.cmd = ACM_SETPOLICY;
@@ -368,8 +368,8 @@
 #define PULL_STATS_SIZE                8192
 int acm_domain_dumpstats(int xc_handle)
 {
-    u8 stats_buffer[PULL_STATS_SIZE];
-    acm_op_t op;
+    uint8_t stats_buffer[PULL_STATS_SIZE];
+    struct acm_op op;
     int ret;
     struct acm_stats_buffer *stats;
 
@@ -442,7 +442,7 @@
     /* this includes header and a set of types */
     #define MAX_SSIDBUFFER  2000
     int ret, i;
-    acm_op_t op;
+    struct acm_op op;
     struct acm_ssid_buffer *hdr;
     unsigned char *buf;
        int nice_print = 1;
diff -r 19af31a59537 -r f31494465fb0 xen/acm/acm_chinesewall_hooks.c
--- a/xen/acm/acm_chinesewall_hooks.c   Fri Oct 21 11:06:17 2005
+++ b/xen/acm/acm_chinesewall_hooks.c   Fri Oct 21 11:07:14 2005
@@ -26,7 +26,10 @@
  *    in which case all types of a new domain must be conflict-free
  *    with all types of already running domains.
  *
+ * indent -i4 -kr -nut
+ *
  */
+
 #include <xen/config.h>
 #include <xen/errno.h>
 #include <xen/types.h>
@@ -48,270 +51,333 @@
  */
 int acm_init_chwall_policy(void)
 {
-       /* minimal startup policy; policy write-locked already */
-       chwall_bin_pol.max_types = 1;
-       chwall_bin_pol.max_ssidrefs = 2;
-       chwall_bin_pol.max_conflictsets = 1;
-       chwall_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 
chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types);
-       chwall_bin_pol.conflict_sets = (domaintype_t 
*)xmalloc_array(domaintype_t, 
chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types);
-       chwall_bin_pol.running_types = (domaintype_t 
*)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
-       chwall_bin_pol.conflict_aggregate_set = (domaintype_t 
*)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
-       
-       if ((chwall_bin_pol.conflict_sets == NULL) || 
(chwall_bin_pol.running_types == NULL) ||
-           (chwall_bin_pol.ssidrefs == NULL) || 
(chwall_bin_pol.conflict_aggregate_set == NULL))
-               return ACM_INIT_SSID_ERROR;
-
-       /* initialize state */
-       memset((void *)chwall_bin_pol.ssidrefs, 0, 
chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.conflict_sets, 0, 
chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.running_types, 0, 
chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.conflict_aggregate_set, 0, 
chwall_bin_pol.max_types*sizeof(domaintype_t));        
-       return ACM_OK;
-}
-
-static int
-chwall_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
-{
-       struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
-       traceprintk("%s.\n", __func__);
-       if (chwall_ssidp == NULL)
-               return ACM_INIT_SSID_ERROR;
-       /* 
-        * depending on wheter chwall is primary or secondary, get the 
respective
-        * part of the global ssidref (same way we'll get the partial ssid 
pointer)
-        */
-       chwall_ssidp->chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, 
ssidref);
-       if ((chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs) ||
-           (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
-               printkd("%s: ERROR chwall_ssidref(%x) undefined (>max) or unset 
(0).\n",
-                       __func__, chwall_ssidp->chwall_ssidref);
-               xfree(chwall_ssidp);
-               return ACM_INIT_SSID_ERROR;
-       }
-       (*chwall_ssid) = chwall_ssidp;
-       printkd("%s: determined chwall_ssidref to %x.\n", 
-              __func__, chwall_ssidp->chwall_ssidref);
-       return ACM_OK;
-}
-
-static void
-chwall_free_domain_ssid(void *chwall_ssid)
-{
-       traceprintk("%s.\n", __func__);
-       if (chwall_ssid != NULL)
-               xfree(chwall_ssid);
-       return;
+    /* minimal startup policy; policy write-locked already */
+    chwall_bin_pol.max_types = 1;
+    chwall_bin_pol.max_ssidrefs = 2;
+    chwall_bin_pol.max_conflictsets = 1;
+    chwall_bin_pol.ssidrefs =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_ssidrefs *
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.conflict_sets =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_conflictsets *
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.running_types =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.conflict_aggregate_set =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_types);
+
+    if ((chwall_bin_pol.conflict_sets == NULL)
+        || (chwall_bin_pol.running_types == NULL)
+        || (chwall_bin_pol.ssidrefs == NULL)
+        || (chwall_bin_pol.conflict_aggregate_set == NULL))
+        return ACM_INIT_SSID_ERROR;
+
+    /* initialize state */
+    memset((void *) chwall_bin_pol.ssidrefs, 0,
+           chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types *
+           sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.conflict_sets, 0,
+           chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types *
+           sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.running_types, 0,
+           chwall_bin_pol.max_types * sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.conflict_aggregate_set, 0,
+           chwall_bin_pol.max_types * sizeof(domaintype_t));
+    return ACM_OK;
+}
+
+static int chwall_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
+{
+    struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
+    traceprintk("%s.\n", __func__);
+    if (chwall_ssidp == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    chwall_ssidp->chwall_ssidref =
+        GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+
+    if ((chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
+        || (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID))
+    {
+        printkd("%s: ERROR chwall_ssidref(%x) undefined (>max) or unset 
(0).\n",
+                __func__, chwall_ssidp->chwall_ssidref);
+        xfree(chwall_ssidp);
+        return ACM_INIT_SSID_ERROR;
+    }
+    (*chwall_ssid) = chwall_ssidp;
+    printkd("%s: determined chwall_ssidref to %x.\n",
+            __func__, chwall_ssidp->chwall_ssidref);
+    return ACM_OK;
+}
+
+static void chwall_free_domain_ssid(void *chwall_ssid)
+{
+    traceprintk("%s.\n", __func__);
+    if (chwall_ssid != NULL)
+        xfree(chwall_ssid);
+    return;
 }
 
 
 /* dump chinese wall cache; policy read-locked already */
-static int
-chwall_dump_policy(u8 *buf, u16 buf_size) {    
-     struct acm_chwall_policy_buffer *chwall_buf = (struct 
acm_chwall_policy_buffer *)buf;
-     int ret = 0;
-
-     chwall_buf->chwall_max_types = htonl(chwall_bin_pol.max_types);
-     chwall_buf->chwall_max_ssidrefs = htonl(chwall_bin_pol.max_ssidrefs);
-     chwall_buf->policy_code = htonl(ACM_CHINESE_WALL_POLICY);
-     chwall_buf->chwall_ssid_offset = htonl(sizeof(struct 
acm_chwall_policy_buffer));
-     chwall_buf->chwall_max_conflictsets = 
htonl(chwall_bin_pol.max_conflictsets);
-     chwall_buf->chwall_conflict_sets_offset =
-            htonl(
-                  ntohl(chwall_buf->chwall_ssid_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_ssidrefs * 
-                  chwall_bin_pol.max_types);
-
-     chwall_buf->chwall_running_types_offset = 
-            htonl(
-                  ntohl(chwall_buf->chwall_conflict_sets_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_conflictsets *
-                  chwall_bin_pol.max_types);
-
-     chwall_buf->chwall_conflict_aggregate_offset =
-            htonl(
-                  ntohl(chwall_buf->chwall_running_types_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_types);
-
-     ret = ntohl(chwall_buf->chwall_conflict_aggregate_offset) +
-            sizeof(domaintype_t) * chwall_bin_pol.max_types;
-
-     /* now copy buffers over */
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_ssid_offset)),
-             chwall_bin_pol.ssidrefs,
-             chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_conflict_sets_offset)),
-             chwall_bin_pol.conflict_sets,
-             chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_running_types_offset)),
-             chwall_bin_pol.running_types,
-             chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + 
ntohl(chwall_buf->chwall_conflict_aggregate_offset)),
-             chwall_bin_pol.conflict_aggregate_set,
-             chwall_bin_pol.max_types);
-     return ret;
+static int chwall_dump_policy(u8 * buf, u32 buf_size)
+{
+    struct acm_chwall_policy_buffer *chwall_buf =
+        (struct acm_chwall_policy_buffer *) buf;
+    int ret = 0;
+
+    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+        return -EINVAL;
+
+    chwall_buf->chwall_max_types = htonl(chwall_bin_pol.max_types);
+    chwall_buf->chwall_max_ssidrefs = htonl(chwall_bin_pol.max_ssidrefs);
+    chwall_buf->policy_code = htonl(ACM_CHINESE_WALL_POLICY);
+    chwall_buf->chwall_ssid_offset =
+        htonl(sizeof(struct acm_chwall_policy_buffer));
+    chwall_buf->chwall_max_conflictsets =
+        htonl(chwall_bin_pol.max_conflictsets);
+    chwall_buf->chwall_conflict_sets_offset =
+        htonl(ntohl(chwall_buf->chwall_ssid_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_ssidrefs *
+              chwall_bin_pol.max_types);
+    chwall_buf->chwall_running_types_offset =
+        htonl(ntohl(chwall_buf->chwall_conflict_sets_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_conflictsets *
+              chwall_bin_pol.max_types);
+    chwall_buf->chwall_conflict_aggregate_offset =
+        htonl(ntohl(chwall_buf->chwall_running_types_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_types);
+
+    ret = ntohl(chwall_buf->chwall_conflict_aggregate_offset) +
+        sizeof(domaintype_t) * chwall_bin_pol.max_types;
+
+    if (buf_size < ret)
+        return -EINVAL;
+
+    /* now copy buffers over */
+    arrcpy16((u16 *) (buf + ntohl(chwall_buf->chwall_ssid_offset)),
+             chwall_bin_pol.ssidrefs,
+             chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_conflict_sets_offset)),
+             chwall_bin_pol.conflict_sets,
+             chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_running_types_offset)),
+             chwall_bin_pol.running_types, chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_conflict_aggregate_offset)),
+             chwall_bin_pol.conflict_aggregate_set,
+             chwall_bin_pol.max_types);
+    return ret;
 }
 
 /* adapt security state (running_types and conflict_aggregate_set) to all 
running
  * domains; chwall_init_state is called when a policy is changed to bring the 
security
  * information into a consistent state and to detect violations (return != 0).
  * from a security point of view, we simulate that all running domains are 
re-started
- */ 
+ */
 static int
-chwall_init_state(struct acm_chwall_policy_buffer *chwall_buf, domaintype_t 
*ssidrefs, domaintype_t *conflict_sets,
-                 domaintype_t *running_types, domaintype_t 
*conflict_aggregate_set)
-{
-       int violation = 0, i, j;
-       struct chwall_ssid *chwall_ssid;
-       ssidref_t chwall_ssidref;
-       struct domain **pd;
-
-        write_lock(&domlist_lock);
-       /* go through all domains and adjust policy as if this domain was 
started now */
-        pd = &domain_list;
-        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-               chwall_ssid = GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct 
acm_ssid_domain *)(*pd)->ssid);
-               chwall_ssidref = chwall_ssid->chwall_ssidref;
-               traceprintk("%s: validating policy for domain %x 
(chwall-REF=%x).\n", 
-                       __func__, (*pd)->domain_id, chwall_ssidref);
-               /* a) adjust types ref-count for running domains */
-               for (i=0; i< chwall_buf->chwall_max_types; i++)
-                       running_types[i] +=
-                               
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i];
-
-               /* b) check for conflict */
-               for (i=0; i< chwall_buf->chwall_max_types; i++)
-                       if (conflict_aggregate_set[i] && 
-                           
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i]) {
-                               printk("%s: CHINESE WALL CONFLICT in type 
%02x.\n", __func__, i);
-                               violation = 1;
-                               goto out;
-                       }
-               /* set violation and break out of the loop */
-               /* c) adapt conflict aggregate set for this domain (notice 
conflicts) */
-               for (i=0; i<chwall_buf->chwall_max_conflictsets; i++) {
-                       int common = 0;
-                       /* check if conflict_set_i and ssidref have common 
types */
-                       for (j=0; j<chwall_buf->chwall_max_types; j++)
-                               if 
(conflict_sets[i*chwall_buf->chwall_max_types + j] &&
-                                   
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j]) {
-                                       common = 1;
-                                       break;
-                               }
-                       if (common == 0)
-                               continue; /* try next conflict set */
-                       /* now add types of the conflict set to 
conflict_aggregate_set (except types in chwall_ssidref) */
-                       for (j=0; j<chwall_buf->chwall_max_types; j++)
-                               if 
(conflict_sets[i*chwall_buf->chwall_max_types + j] &&
-                                   
!ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j])
-                                       conflict_aggregate_set[j]++;
-               }       
-       }
+chwall_init_state(struct acm_chwall_policy_buffer *chwall_buf,
+                  domaintype_t * ssidrefs, domaintype_t * conflict_sets,
+                  domaintype_t * running_types,
+                  domaintype_t * conflict_aggregate_set)
+{
+    int violation = 0, i, j;
+    struct chwall_ssid *chwall_ssid;
+    ssidref_t chwall_ssidref;
+    struct domain **pd;
+
+    write_lock(&domlist_lock);
+    /* go through all domains and adjust policy as if this domain was started 
now */
+    pd = &domain_list;
+    for (pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list)
+    {
+        chwall_ssid =
+            GET_SSIDP(ACM_CHINESE_WALL_POLICY,
+                      (struct acm_ssid_domain *) (*pd)->ssid);
+        chwall_ssidref = chwall_ssid->chwall_ssidref;
+        traceprintk("%s: validating policy for domain %x (chwall-REF=%x).\n",
+                    __func__, (*pd)->domain_id, chwall_ssidref);
+        /* a) adjust types ref-count for running domains */
+        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+            running_types[i] +=
+                ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i];
+
+        /* b) check for conflict */
+        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+            if (conflict_aggregate_set[i] &&
+                ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i])
+            {
+                printk("%s: CHINESE WALL CONFLICT in type %02x.\n",
+                       __func__, i);
+                violation = 1;
+                goto out;
+            }
+        /* set violation and break out of the loop */
+        /* c) adapt conflict aggregate set for this domain (notice conflicts) 
*/
+        for (i = 0; i < chwall_buf->chwall_max_conflictsets; i++)
+        {
+            int common = 0;
+            /* check if conflict_set_i and ssidref have common types */
+            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+                if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
+                    ssidrefs[chwall_ssidref *
+                            chwall_buf->chwall_max_types + j])
+                {
+                    common = 1;
+                    break;
+                }
+            if (common == 0)
+                continue;       /* try next conflict set */
+            /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
+            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+                if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
+                    !ssidrefs[chwall_ssidref *
+                             chwall_buf->chwall_max_types + j])
+                    conflict_aggregate_set[j]++;
+        }
+    }
  out:
-        write_unlock(&domlist_lock);
-       return violation;
-       /* returning "violation != 0" means that the currently running set of 
domains would 
-        * not be possible if the new policy had been enforced before starting 
them; for chinese
-        * wall, this means that the new policy includes at least one conflict 
set of which 
-        * more than one type is currently running */
-}
-
-static int
-chwall_set_policy(u8 *buf, u16 buf_size) 
-{      
-       /* policy write-locked already */
-       struct acm_chwall_policy_buffer *chwall_buf = (struct 
acm_chwall_policy_buffer *)buf;
-       void *ssids = NULL, *conflict_sets = NULL, *running_types = NULL, 
*conflict_aggregate_set = NULL;       
-
-        /* rewrite the policy due to endianess */
-        chwall_buf->policy_code                      = 
ntohl(chwall_buf->policy_code);
-        chwall_buf->policy_version                   = 
ntohl(chwall_buf->policy_version);
-        chwall_buf->chwall_max_types                 = 
ntohl(chwall_buf->chwall_max_types);
-        chwall_buf->chwall_max_ssidrefs              = 
ntohl(chwall_buf->chwall_max_ssidrefs);
-        chwall_buf->chwall_max_conflictsets          = 
ntohl(chwall_buf->chwall_max_conflictsets);
-        chwall_buf->chwall_ssid_offset               = 
ntohl(chwall_buf->chwall_ssid_offset);
-        chwall_buf->chwall_conflict_sets_offset      = 
ntohl(chwall_buf->chwall_conflict_sets_offset);
-        chwall_buf->chwall_running_types_offset      = 
ntohl(chwall_buf->chwall_running_types_offset);
-        chwall_buf->chwall_conflict_aggregate_offset = 
ntohl(chwall_buf->chwall_conflict_aggregate_offset);
-
-       /* policy type and version checks */
-       if ((chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
-           (chwall_buf->policy_version != ACM_CHWALL_VERSION))
-               return -EINVAL;
-
-       /* 1. allocate new buffers */
-       ssids = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_types*chwall_buf->chwall_max_ssidrefs);
-       conflict_sets = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_conflictsets*chwall_buf->chwall_max_types);
-       running_types = 
xmalloc_array(domaintype_t,chwall_buf->chwall_max_types);
-       conflict_aggregate_set = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_types);
-
-       if ((ssids == NULL)||(conflict_sets == NULL)||(running_types == 
NULL)||(conflict_aggregate_set == NULL))
-               goto error_free;
-
-       /* 2. set new policy */
-       if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) * 
-           chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs > 
buf_size)
-               goto error_free;
-       arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
-              sizeof(domaintype_t),  
-              chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
-
-       if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) * 
-           chwall_buf->chwall_max_types * chwall_buf->chwall_max_conflictsets 
> buf_size)
-               goto error_free;
-
-       arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
-              sizeof(domaintype_t),
-              chwall_buf->chwall_max_types * 
chwall_buf->chwall_max_conflictsets);
-
-       /* we also use new state buffers since max_types can change */
-       memset(running_types, 0, 
sizeof(domaintype_t)*chwall_buf->chwall_max_types);
-       memset(conflict_aggregate_set, 0, 
sizeof(domaintype_t)*chwall_buf->chwall_max_types);
-
-       /* 3. now re-calculate the state for the new policy based on running 
domains; 
-        *    this can fail if new policy is conflicting with running domains */
-       if (chwall_init_state(chwall_buf, ssids, conflict_sets, running_types, 
conflict_aggregate_set)) {
-               printk("%s: New policy conflicts with running domains. Policy 
load aborted.\n", __func__);
-               goto error_free; /* new policy conflicts with running domains */
-       }
-       /* 4. free old policy buffers, replace with new ones */
-       chwall_bin_pol.max_types = chwall_buf->chwall_max_types;
-       chwall_bin_pol.max_ssidrefs = chwall_buf->chwall_max_ssidrefs;
-       chwall_bin_pol.max_conflictsets = chwall_buf->chwall_max_conflictsets;
-       if (chwall_bin_pol.ssidrefs != NULL) 
-               xfree(chwall_bin_pol.ssidrefs);
-       if (chwall_bin_pol.conflict_aggregate_set != NULL) 
-               xfree(chwall_bin_pol.conflict_aggregate_set);
-       if (chwall_bin_pol.running_types != NULL) 
-               xfree(chwall_bin_pol.running_types);
-       if (chwall_bin_pol.conflict_sets != NULL) 
-               xfree(chwall_bin_pol.conflict_sets);
-       chwall_bin_pol.ssidrefs = ssids;
-       chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
-       chwall_bin_pol.running_types = running_types;
-       chwall_bin_pol.conflict_sets = conflict_sets;
-       return ACM_OK;
-
-error_free:
-       printk("%s: ERROR setting policy.\n", __func__);
-       if (ssids != NULL) xfree(ssids);
-       if (conflict_sets != NULL) xfree(conflict_sets);
-       if (running_types != NULL) xfree(running_types);
-       if (conflict_aggregate_set != NULL) xfree(conflict_aggregate_set);
-       return -EFAULT;
-}
-       
-static int 
-chwall_dump_stats(u8 *buf, u16 len)
-{
-       /* no stats for Chinese Wall Policy */
-       return 0;
-}
-
-static int
-chwall_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
+    write_unlock(&domlist_lock);
+    return violation;
+    /* returning "violation != 0" means that the currently running set of 
domains would
+     * not be possible if the new policy had been enforced before starting 
them; for chinese
+     * wall, this means that the new policy includes at least one conflict set 
of which
+     * more than one type is currently running */
+}
+
+static int chwall_set_policy(u8 * buf, u32 buf_size)
+{
+    /* policy write-locked already */
+    struct acm_chwall_policy_buffer *chwall_buf =
+        (struct acm_chwall_policy_buffer *) buf;
+    void *ssids = NULL, *conflict_sets = NULL, *running_types =
+        NULL, *conflict_aggregate_set = NULL;
+
+    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+        return -EINVAL;
+
+    /* rewrite the policy due to endianess */
+    chwall_buf->policy_code = ntohl(chwall_buf->policy_code);
+    chwall_buf->policy_version = ntohl(chwall_buf->policy_version);
+    chwall_buf->chwall_max_types = ntohl(chwall_buf->chwall_max_types);
+    chwall_buf->chwall_max_ssidrefs =
+        ntohl(chwall_buf->chwall_max_ssidrefs);
+    chwall_buf->chwall_max_conflictsets =
+        ntohl(chwall_buf->chwall_max_conflictsets);
+    chwall_buf->chwall_ssid_offset = ntohl(chwall_buf->chwall_ssid_offset);
+    chwall_buf->chwall_conflict_sets_offset =
+        ntohl(chwall_buf->chwall_conflict_sets_offset);
+    chwall_buf->chwall_running_types_offset =
+        ntohl(chwall_buf->chwall_running_types_offset);
+    chwall_buf->chwall_conflict_aggregate_offset =
+        ntohl(chwall_buf->chwall_conflict_aggregate_offset);
+
+    /* policy type and version checks */
+    if ((chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
+        (chwall_buf->policy_version != ACM_CHWALL_VERSION))
+        return -EINVAL;
+
+    /* 1. allocate new buffers */
+    ssids =
+        xmalloc_array(domaintype_t,
+                      chwall_buf->chwall_max_types *
+                      chwall_buf->chwall_max_ssidrefs);
+    conflict_sets =
+        xmalloc_array(domaintype_t,
+                      chwall_buf->chwall_max_conflictsets *
+                      chwall_buf->chwall_max_types);
+    running_types =
+        xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
+    conflict_aggregate_set =
+        xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
+
+    if ((ssids == NULL) || (conflict_sets == NULL)
+        || (running_types == NULL) || (conflict_aggregate_set == NULL))
+        goto error_free;
+
+    /* 2. set new policy */
+    if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) *
+        chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs >
+        buf_size)
+        goto error_free;
+
+    arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
+           sizeof(domaintype_t),
+           chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
+
+    if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) *
+        chwall_buf->chwall_max_types *
+        chwall_buf->chwall_max_conflictsets > buf_size)
+        goto error_free;
+
+    arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
+           sizeof(domaintype_t),
+           chwall_buf->chwall_max_types *
+           chwall_buf->chwall_max_conflictsets);
+
+    /* we also use new state buffers since max_types can change */
+    memset(running_types, 0,
+           sizeof(domaintype_t) * chwall_buf->chwall_max_types);
+    memset(conflict_aggregate_set, 0,
+           sizeof(domaintype_t) * chwall_buf->chwall_max_types);
+
+    /* 3. now re-calculate the state for the new policy based on running 
domains;
+     *    this can fail if new policy is conflicting with running domains */
+    if (chwall_init_state(chwall_buf, ssids,
+                          conflict_sets, running_types,
+                          conflict_aggregate_set))
+    {
+        printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n",
+               __func__);
+        goto error_free;        /* new policy conflicts with running domains */
+    }
+    /* 4. free old policy buffers, replace with new ones */
+    chwall_bin_pol.max_types = chwall_buf->chwall_max_types;
+    chwall_bin_pol.max_ssidrefs = chwall_buf->chwall_max_ssidrefs;
+    chwall_bin_pol.max_conflictsets = chwall_buf->chwall_max_conflictsets;
+    if (chwall_bin_pol.ssidrefs != NULL)
+        xfree(chwall_bin_pol.ssidrefs);
+    if (chwall_bin_pol.conflict_aggregate_set != NULL)
+        xfree(chwall_bin_pol.conflict_aggregate_set);
+    if (chwall_bin_pol.running_types != NULL)
+        xfree(chwall_bin_pol.running_types);
+    if (chwall_bin_pol.conflict_sets != NULL)
+        xfree(chwall_bin_pol.conflict_sets);
+    chwall_bin_pol.ssidrefs = ssids;
+    chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
+    chwall_bin_pol.running_types = running_types;
+    chwall_bin_pol.conflict_sets = conflict_sets;
+    return ACM_OK;
+
+ error_free:
+    printk("%s: ERROR setting policy.\n", __func__);
+    if (ssids != NULL)
+        xfree(ssids);
+    if (conflict_sets != NULL)
+        xfree(conflict_sets);
+    if (running_types != NULL)
+        xfree(running_types);
+    if (conflict_aggregate_set != NULL)
+        xfree(conflict_aggregate_set);
+    return -EFAULT;
+}
+
+static int chwall_dump_stats(u8 * buf, u16 len)
+{
+    /* no stats for Chinese Wall Policy */
+    return 0;
+}
+
+static int chwall_dump_ssid_types(ssidref_t ssidref, u8 * buf, u16 len)
 {
     int i;
 
@@ -319,12 +385,14 @@
     if (chwall_bin_pol.max_types > len)
         return -EFAULT;
 
-       if (ssidref >= chwall_bin_pol.max_ssidrefs)
-               return -EFAULT;
+    if (ssidref >= chwall_bin_pol.max_ssidrefs)
+        return -EFAULT;
 
     /* read types for chwall ssidref */
-    for(i=0; i< chwall_bin_pol.max_types; i++) {
-        if (chwall_bin_pol.ssidrefs[ssidref * chwall_bin_pol.max_types + i])
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+    {
+        if (chwall_bin_pol.
+            ssidrefs[ssidref * chwall_bin_pol.max_types + i])
             buf[i] = 1;
         else
             buf[i] = 0;
@@ -336,198 +404,239 @@
  * Authorization functions
  ***************************/
 
-
 /* -------- DOMAIN OPERATION HOOKS -----------*/
 
-static int 
-chwall_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
-{
-       ssidref_t chwall_ssidref;
-       int i,j;
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID) {
-               printk("%s: ERROR CHWALL SSID is NOT SET but policy 
enforced.\n", __func__);
-               read_unlock(&acm_bin_pol_rwlock);
-               return ACM_ACCESS_DENIED; /* catching and indicating config 
error */
-       }
-       if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs) {
-               printk("%s: ERROR chwall_ssidref > max(%x).\n",
-                      __func__, chwall_bin_pol.max_ssidrefs-1);
-               read_unlock(&acm_bin_pol_rwlock);
-               return ACM_ACCESS_DENIED;
-       }
-       /* A: chinese wall check for conflicts */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               if (chwall_bin_pol.conflict_aggregate_set[i] && 
-                   
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i]) {
-                       printk("%s: CHINESE WALL CONFLICT in type %02x.\n", 
__func__, i);
-                       read_unlock(&acm_bin_pol_rwlock);
-                       return ACM_ACCESS_DENIED;
-               }
-
-       /* B: chinese wall conflict set adjustment (so that other 
-        *      other domains simultaneously created are evaluated against this 
new set)*/
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]++;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_PERMITTED;
-}
-
-static void
-chwall_post_domain_create(domid_t domid, ssidref_t ssidref)
-{
-       int i,j;
-       ssidref_t chwall_ssidref;
-       traceprintk("%s.\n", __func__);
-       
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       /* adjust types ref-count for running domains */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               chwall_bin_pol.running_types[i] +=
-                       
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
-       if (domid) {
-               read_unlock(&acm_bin_pol_rwlock);
-               return;
-       }
-       /* Xen does not call pre-create hook for DOM0;
-        * to consider type conflicts of any domain with DOM0, we need
-        * to adjust the conflict_aggregate for DOM0 here the same way it
-        * is done for non-DOM0 domains in the pre-hook */
-       printkd("%s: adjusting security state for DOM0 (ssidref=%x, 
chwall_ssidref=%x).\n", 
-               __func__, ssidref, chwall_ssidref);
-
-       /* chinese wall conflict set adjustment (so that other 
-        *      other domains simultaneously created are evaluated against this 
new set)*/
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]++;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return;
+static int chwall_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
+{
+    ssidref_t chwall_ssidref;
+    int i, j;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID)
+    {
+        printk("%s: ERROR CHWALL SSID is NOT SET but policy enforced.\n",
+               __func__);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;       /* catching and indicating config 
error */
+    }
+    if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
+    {
+        printk("%s: ERROR chwall_ssidref > max(%x).\n",
+               __func__, chwall_bin_pol.max_ssidrefs - 1);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;
+    }
+    /* A: chinese wall check for conflicts */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        if (chwall_bin_pol.conflict_aggregate_set[i] &&
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i])
+        {
+            printk("%s: CHINESE WALL CONFLICT in type %02x.\n", __func__, i);
+            read_unlock(&acm_bin_pol_rwlock);
+            return ACM_ACCESS_DENIED;
+        }
+
+    /* B: chinese wall conflict set adjustment (so that other
+     *      other domains simultaneously created are evaluated against this 
new set)*/
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]++;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return ACM_ACCESS_PERMITTED;
+}
+
+static void chwall_post_domain_create(domid_t domid, ssidref_t ssidref)
+{
+    int i, j;
+    ssidref_t chwall_ssidref;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    /* adjust types ref-count for running domains */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        chwall_bin_pol.running_types[i] +=
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i];
+    if (domid)
+    {
+        read_unlock(&acm_bin_pol_rwlock);
+        return;
+    }
+    /* Xen does not call pre-create hook for DOM0;
+     * to consider type conflicts of any domain with DOM0, we need
+     * to adjust the conflict_aggregate for DOM0 here the same way it
+     * is done for non-DOM0 domains in the pre-hook */
+    printkd("%s: adjusting security state for DOM0 (ssidref=%x, 
chwall_ssidref=%x).\n",
+            __func__, ssidref, chwall_ssidref);
+
+    /* chinese wall conflict set adjustment (so that other
+     *      other domains simultaneously created are evaluated against this 
new set)*/
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]++;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return;
 }
 
 static void
 chwall_fail_domain_create(void *subject_ssid, ssidref_t ssidref)
 {
-       int i, j;
-       ssidref_t chwall_ssidref;
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       /* roll-back: re-adjust conflicting types aggregate */
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set, this one does not 
include any type of chwall_ssidref */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]--;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-}
-
-
-static void
-chwall_post_domain_destroy(void *object_ssid, domid_t id) 
-{
-       int i,j;
-       struct chwall_ssid *chwall_ssidp = 
-               GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct acm_ssid_domain 
*)object_ssid);
-       ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
-
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       /* adjust running types set */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               chwall_bin_pol.running_types[i] -=
-                       
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
-
-       /* roll-back: re-adjust conflicting types aggregate */
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set, this one does not 
include any type of chwall_ssidref */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]--;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return;
+    int i, j;
+    ssidref_t chwall_ssidref;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    /* roll-back: re-adjust conflicting types aggregate */
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set, this one does not 
include any type of chwall_ssidref */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]--;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+}
+
+
+static void chwall_post_domain_destroy(void *object_ssid, domid_t id)
+{
+    int i, j;
+    struct chwall_ssid *chwall_ssidp = GET_SSIDP(ACM_CHINESE_WALL_POLICY,
+                                                 (struct acm_ssid_domain *)
+                                                 object_ssid);
+    ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
+
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    /* adjust running types set */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        chwall_bin_pol.running_types[i] -=
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i];
+
+    /* roll-back: re-adjust conflicting types aggregate */
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set, this one does not 
include any type of chwall_ssidref */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]--;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return;
 }
 
 struct acm_operations acm_chinesewall_ops = {
-       /* policy management services */
-       .init_domain_ssid               = chwall_init_domain_ssid,
-       .free_domain_ssid               = chwall_free_domain_ssid,
-       .dump_binary_policy             = chwall_dump_policy,
-       .set_binary_policy              = chwall_set_policy,
-       .dump_statistics                = chwall_dump_stats,
-    .dump_ssid_types        = chwall_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = chwall_pre_domain_create,
-       .post_domain_create             = chwall_post_domain_create,
-       .fail_domain_create             = chwall_fail_domain_create,
-       .post_domain_destroy            = chwall_post_domain_destroy,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound       = NULL,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = NULL,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref              = NULL,
-       .fail_grant_map_ref             = NULL,
-       .pre_grant_setup                = NULL,
-       .fail_grant_setup               = NULL,
+    /* policy management services */
+    .init_domain_ssid = chwall_init_domain_ssid,
+    .free_domain_ssid = chwall_free_domain_ssid,
+    .dump_binary_policy = chwall_dump_policy,
+    .set_binary_policy = chwall_set_policy,
+    .dump_statistics = chwall_dump_stats,
+    .dump_ssid_types = chwall_dump_ssid_types,
+    /* domain management control hooks */
+    .pre_domain_create = chwall_pre_domain_create,
+    .post_domain_create = chwall_post_domain_create,
+    .fail_domain_create = chwall_fail_domain_create,
+    .post_domain_destroy = chwall_post_domain_destroy,
+    /* event channel control hooks */
+    .pre_eventchannel_unbound = NULL,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = NULL,
+    .fail_eventchannel_interdomain = NULL,
+    /* grant table control hooks */
+    .pre_grant_map_ref = NULL,
+    .fail_grant_map_ref = NULL,
+    .pre_grant_setup = NULL,
+    .fail_grant_setup = NULL,
+    /* generic domain-requested decision hooks */
+    .sharing = NULL,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/acm/acm_core.c
--- a/xen/acm/acm_core.c        Fri Oct 21 11:06:17 2005
+++ b/xen/acm/acm_core.c        Fri Oct 21 11:07:14 2005
@@ -47,7 +47,7 @@
 void acm_init_ste_policy(void);
 
 extern struct acm_operations acm_chinesewall_ops, 
-       acm_simple_type_enforcement_ops, acm_null_ops;
+    acm_simple_type_enforcement_ops, acm_null_ops;
 
 /* global ops structs called by the hooks */
 struct acm_operations *acm_primary_ops = NULL;
@@ -66,7 +66,7 @@
     u32 test = 1;
     if (*((u8 *)&test) == 1)
     {
-       printk("ACM module running in LITTLE ENDIAN.\n");
+        printk("ACM module running in LITTLE ENDIAN.\n");
         little_endian = 1;
     }
     else
@@ -80,10 +80,10 @@
 static void
 acm_init_binary_policy(void *primary, void *secondary)
 {
-       acm_bin_pol.primary_policy_code = 0;
-       acm_bin_pol.secondary_policy_code = 0;
-       acm_bin_pol.primary_binary_policy = primary;
-       acm_bin_pol.secondary_binary_policy = secondary;
+    acm_bin_pol.primary_policy_code = 0;
+    acm_bin_pol.secondary_policy_code = 0;
+    acm_bin_pol.primary_binary_policy = primary;
+    acm_bin_pol.secondary_binary_policy = secondary;
 }
 
 static int
@@ -96,7 +96,7 @@
     int rc = ACM_OK;
 
     if (mbi->mods_count > 1)
-           *initrdidx = 1;
+        *initrdidx = 1;
 
     /*
      * Try all modules and see whichever could be the binary policy.
@@ -115,14 +115,14 @@
 #error Architecture unsupported by sHype
 #endif
         _policy_len   = mod[i].mod_end - mod[i].mod_start;
-       if (_policy_len < sizeof(struct acm_policy_buffer))
-               continue; /* not a policy */
+        if (_policy_len < sizeof(struct acm_policy_buffer))
+            continue; /* not a policy */
 
         pol = (struct acm_policy_buffer *)_policy_start;
         if (ntohl(pol->magic) == ACM_MAGIC)
         {
             rc = acm_set_policy((void *)_policy_start,
-                                (u16)_policy_len,
+                                (u32)_policy_len,
                                 0);
             if (rc == ACM_OK)
             {
@@ -145,7 +145,7 @@
             }
             else
             {
-               printk("Invalid policy. %d.th module line.\n", i+1);
+                printk("Invalid policy. %d.th module line.\n", i+1);
             }
         } /* end if a binary policy definition, i.e., (ntohl(pol->magic) == 
ACM_MAGIC ) */
     }
@@ -158,10 +158,10 @@
          const multiboot_info_t *mbi,
          unsigned long initial_images_start)
 {
-       int ret = ACM_OK;
+    int ret = ACM_OK;
 
     acm_set_endian();
-       write_lock(&acm_bin_pol_rwlock);
+    write_lock(&acm_bin_pol_rwlock);
     acm_init_binary_policy(NULL, NULL);
 
     /* set primary policy component */
@@ -170,14 +170,14 @@
 
     case ACM_CHINESE_WALL_POLICY:
         acm_init_chwall_policy();
-               acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
-               acm_primary_ops = &acm_chinesewall_ops;
+        acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
+        acm_primary_ops = &acm_chinesewall_ops;
         break;
 
     case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
         acm_init_ste_policy();
-               acm_bin_pol.primary_policy_code = 
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
-               acm_primary_ops = &acm_simple_type_enforcement_ops;
+        acm_bin_pol.primary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+        acm_primary_ops = &acm_simple_type_enforcement_ops;
         break;
 
     default:
@@ -190,9 +190,9 @@
     /* secondary policy component part */
     switch ((ACM_USE_SECURITY_POLICY) >> 4) {
     case ACM_NULL_POLICY:
-               acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
-               acm_secondary_ops = &acm_null_ops;
-               break;
+        acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
+        acm_secondary_ops = &acm_null_ops;
+        break;
 
     case ACM_CHINESE_WALL_POLICY:
         if (acm_bin_pol.primary_policy_code == ACM_CHINESE_WALL_POLICY)
@@ -200,9 +200,9 @@
             ret = -EINVAL;
             goto out;
         }
-               acm_init_chwall_policy();
+        acm_init_chwall_policy();
         acm_bin_pol.secondary_policy_code = ACM_CHINESE_WALL_POLICY;
-               acm_secondary_ops = &acm_chinesewall_ops;
+        acm_secondary_ops = &acm_chinesewall_ops;
         break;
 
     case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
@@ -211,9 +211,9 @@
             ret = -EINVAL;
             goto out;
         }
-               acm_init_ste_policy();
-               acm_bin_pol.secondary_policy_code = 
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
-               acm_secondary_ops = &acm_simple_type_enforcement_ops;
+        acm_init_ste_policy();
+        acm_bin_pol.secondary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+        acm_secondary_ops = &acm_simple_type_enforcement_ops;
         break;
 
     default:
@@ -222,96 +222,103 @@
     }
 
  out:
-       write_unlock(&acm_bin_pol_rwlock);
-
-       if (ret != ACM_OK)
-    {
-        printk("%s: Error setting policies.\n", __func__);
+    write_unlock(&acm_bin_pol_rwlock);
+
+    if (ret != ACM_OK)
+    {
+        printk("%s: Error initializing policies.\n", __func__);
         /* here one could imagine a clean panic */
-               return -EINVAL;
-       }
-       acm_setup(initrdidx, mbi, initial_images_start);
-       printk("%s: Enforcing Primary %s, Secondary %s.\n", __func__, 
-              ACM_POLICY_NAME(acm_bin_pol.primary_policy_code),
+        return -EINVAL;
+    }
+    if (acm_setup(initrdidx, mbi, initial_images_start) != ACM_OK)
+    {
+        printk("%s: Error loading policy at boot time.\n", __func__);
+        /* ignore, just continue with the minimal hardcoded startup policy */
+    }
+    printk("%s: Enforcing Primary %s, Secondary %s.\n", __func__, 
+           ACM_POLICY_NAME(acm_bin_pol.primary_policy_code),
            ACM_POLICY_NAME(acm_bin_pol.secondary_policy_code));
-       return ret;
+    return ret;
 }
 
 int
 acm_init_domain_ssid(domid_t id, ssidref_t ssidref)
 {
-       struct acm_ssid_domain *ssid;
-       struct domain *subj = find_domain_by_id(id);
-       int ret1, ret2;
-       
-       if (subj == NULL)
-    {
-               printk("%s: ACM_NULL_POINTER ERROR (id=%x).\n", __func__, id);
-               return ACM_NULL_POINTER_ERROR;
-       }
-       if ((ssid = xmalloc(struct acm_ssid_domain)) == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       ssid->datatype       = DOMAIN;
-       ssid->subject        = subj;
-       ssid->domainid       = subj->domain_id;
-       ssid->primary_ssid   = NULL;
-       ssid->secondary_ssid = NULL;
-
-       if (ACM_USE_SECURITY_POLICY != ACM_NULL_POLICY)
-               ssid->ssidref = ssidref;
-       else
-               ssid->ssidref = ACM_DEFAULT_SSID;
-
-       subj->ssid           = ssid;
-       /* now fill in primary and secondary parts; we only get here through 
hooks */
-       if (acm_primary_ops->init_domain_ssid != NULL)
-               ret1 = acm_primary_ops->init_domain_ssid(&(ssid->primary_ssid), 
ssidref);
-       else
-               ret1 = ACM_OK;
-
-       if (acm_secondary_ops->init_domain_ssid != NULL)
-               ret2 = 
acm_secondary_ops->init_domain_ssid(&(ssid->secondary_ssid), ssidref);
-       else
-               ret2 = ACM_OK;
-
-       if ((ret1 != ACM_OK) || (ret2 != ACM_OK))
-    {
-               printk("%s: ERROR instantiating individual ssids for domain 
0x%02x.\n",
-                      __func__, subj->domain_id);
-               acm_free_domain_ssid(ssid);     
-               put_domain(subj);
-               return ACM_INIT_SSID_ERROR;
-       }
-       printk("%s: assigned domain %x the ssidref=%x.\n",
+    struct acm_ssid_domain *ssid;
+    struct domain *subj = find_domain_by_id(id);
+    int ret1, ret2;
+ 
+    if (subj == NULL)
+    {
+        printk("%s: ACM_NULL_POINTER ERROR (id=%x).\n", __func__, id);
+        return ACM_NULL_POINTER_ERROR;
+    }
+    if ((ssid = xmalloc(struct acm_ssid_domain)) == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    ssid->datatype       = DOMAIN;
+    ssid->subject        = subj;
+    ssid->domainid      = subj->domain_id;
+    ssid->primary_ssid   = NULL;
+    ssid->secondary_ssid = NULL;
+
+    if (ACM_USE_SECURITY_POLICY != ACM_NULL_POLICY)
+        ssid->ssidref = ssidref;
+    else
+        ssid->ssidref = ACM_DEFAULT_SSID;
+
+    subj->ssid           = ssid;
+    /* now fill in primary and secondary parts; we only get here through hooks 
*/
+    if (acm_primary_ops->init_domain_ssid != NULL)
+        ret1 = acm_primary_ops->init_domain_ssid(&(ssid->primary_ssid), 
ssidref);
+    else
+        ret1 = ACM_OK;
+
+    if (acm_secondary_ops->init_domain_ssid != NULL)
+        ret2 = acm_secondary_ops->init_domain_ssid(&(ssid->secondary_ssid), 
ssidref);
+    else
+        ret2 = ACM_OK;
+
+    if ((ret1 != ACM_OK) || (ret2 != ACM_OK))
+    {
+        printk("%s: ERROR instantiating individual ssids for domain 0x%02x.\n",
+               __func__, subj->domain_id);
+        acm_free_domain_ssid(ssid); 
+        put_domain(subj);
+        return ACM_INIT_SSID_ERROR;
+    }
+    printk("%s: assigned domain %x the ssidref=%x.\n",
            __func__, id, ssid->ssidref);
-       put_domain(subj);
-       return ACM_OK;
-}
-
-
-int
+    put_domain(subj);
+    return ACM_OK;
+}
+
+
+void
 acm_free_domain_ssid(struct acm_ssid_domain *ssid)
 {
-       domid_t id;
-
-       /* domain is already gone, just ssid is left */
-       if (ssid == NULL)
-    {
-               printk("%s: ACM_NULL_POINTER ERROR.\n", __func__);
-               return ACM_NULL_POINTER_ERROR;
-       }
-    id = ssid->domainid;
-       ssid->subject        = NULL;
-
-       if (acm_primary_ops->free_domain_ssid != NULL) /* null policy */
-               acm_primary_ops->free_domain_ssid(ssid->primary_ssid);
-       ssid->primary_ssid = NULL;
-       if (acm_secondary_ops->free_domain_ssid != NULL)
-               acm_secondary_ops->free_domain_ssid(ssid->secondary_ssid);
-       ssid->secondary_ssid = NULL;
-       xfree(ssid);
-       printkd("%s: Freed individual domain ssid (domain=%02x).\n",
+    /* domain is already gone, just ssid is left */
+    if (ssid == NULL)
+        return;
+
+    ssid->subject = NULL;
+    if (acm_primary_ops->free_domain_ssid != NULL) /* null policy */
+        acm_primary_ops->free_domain_ssid(ssid->primary_ssid);
+    ssid->primary_ssid = NULL;
+    if (acm_secondary_ops->free_domain_ssid != NULL)
+        acm_secondary_ops->free_domain_ssid(ssid->secondary_ssid);
+    ssid->secondary_ssid = NULL;
+    xfree(ssid);
+    printkd("%s: Freed individual domain ssid (domain=%02x).\n",
             __func__, id);
-       return ACM_OK;
-}
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/acm/acm_null_hooks.c
--- a/xen/acm/acm_null_hooks.c  Fri Oct 21 11:06:17 2005
+++ b/xen/acm/acm_null_hooks.c  Fri Oct 21 11:07:14 2005
@@ -11,37 +11,38 @@
  * published by the Free Software Foundation, version 2 of the
  * License.
  */
+
 #include <acm/acm_hooks.h>
 
 static int
 null_init_domain_ssid(void **ssid, ssidref_t ssidref)
 {
-       return ACM_OK;
+    return ACM_OK;
 }
 
 static void
 null_free_domain_ssid(void *ssid)
 {
-       return;
+    return;
 }
 
 static int
-null_dump_binary_policy(u8 *buf, u16 buf_size) 
-{      
-       return 0;
+null_dump_binary_policy(u8 *buf, u32 buf_size)
+{ 
+    return 0;
 }
 
 static int
-null_set_binary_policy(u8 *buf, u16 buf_size) 
-{      
-       return ACM_OK;
+null_set_binary_policy(u8 *buf, u32 buf_size)
+{ 
+    return ACM_OK;
 }
-       
+ 
 static int 
 null_dump_stats(u8 *buf, u16 buf_size)
 {
-       /* no stats for NULL policy */
-       return 0;
+    /* no stats for NULL policy */
+    return 0;
 }
 
 static int
@@ -54,25 +55,35 @@
 
 /* now define the hook structure similarly to LSM */
 struct acm_operations acm_null_ops = {
-       .init_domain_ssid               = null_init_domain_ssid,
-       .free_domain_ssid               = null_free_domain_ssid,
-       .dump_binary_policy             = null_dump_binary_policy,
-       .set_binary_policy              = null_set_binary_policy,
-       .dump_statistics                = null_dump_stats,
-    .dump_ssid_types        = null_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = NULL,
-       .post_domain_create             = NULL,
-       .fail_domain_create             = NULL,
-       .post_domain_destroy            = NULL,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound       = NULL,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = NULL,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref              = NULL,
-       .fail_grant_map_ref             = NULL,
-       .pre_grant_setup                = NULL,
-       .fail_grant_setup               = NULL
+    .init_domain_ssid = null_init_domain_ssid,
+    .free_domain_ssid = null_free_domain_ssid,
+    .dump_binary_policy = null_dump_binary_policy,
+    .set_binary_policy = null_set_binary_policy,
+    .dump_statistics = null_dump_stats,
+    .dump_ssid_types = null_dump_ssid_types,
+    /* domain management control hooks */
+    .pre_domain_create = NULL,
+    .post_domain_create = NULL,
+    .fail_domain_create = NULL,
+    .post_domain_destroy = NULL,
+    /* event channel control hooks */
+    .pre_eventchannel_unbound = NULL,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = NULL,
+    .fail_eventchannel_interdomain = NULL,
+    /* grant table control hooks */
+    .pre_grant_map_ref = NULL,
+    .fail_grant_map_ref = NULL,
+    .pre_grant_setup = NULL,
+    .fail_grant_setup = NULL
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/acm/acm_policy.c
--- a/xen/acm/acm_policy.c      Fri Oct 21 11:06:17 2005
+++ b/xen/acm/acm_policy.c      Fri Oct 21 11:07:14 2005
@@ -32,165 +32,166 @@
 #include <acm/acm_endian.h>
 
 int
-acm_set_policy(void *buf, u16 buf_size, int isuserbuffer)
+acm_set_policy(void *buf, u32 buf_size, int isuserbuffer)
 {
-       u8 *policy_buffer = NULL;
-       struct acm_policy_buffer *pol;
-       
+    u8 *policy_buffer = NULL;
+    struct acm_policy_buffer *pol;
+ 
     if (buf_size < sizeof(struct acm_policy_buffer))
-               return -EFAULT;
-
-       /* 1. copy buffer from domain */
-       if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-       if (isuserbuffer) {
-               if (copy_from_user(policy_buffer, buf, buf_size))
+        return -EFAULT;
+
+    /* 1. copy buffer from domain */
+    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    if (isuserbuffer) {
+        if (copy_from_user(policy_buffer, buf, buf_size))
         {
-                       printk("%s: Error copying!\n",__func__);
-                       goto error_free;
-               }
-       } else
-               memcpy(policy_buffer, buf, buf_size);
-
-       /* 2. some sanity checking */
-       pol = (struct acm_policy_buffer *)policy_buffer;
-
-       if ((ntohl(pol->magic) != ACM_MAGIC) || 
-           (ntohl(pol->policy_version) != ACM_POLICY_VERSION) ||
-           (ntohl(pol->primary_policy_code) != 
acm_bin_pol.primary_policy_code) ||
-           (ntohl(pol->secondary_policy_code) != 
acm_bin_pol.secondary_policy_code))
+            printk("%s: Error copying!\n",__func__);
+            goto error_free;
+        }
+    } else
+        memcpy(policy_buffer, buf, buf_size);
+
+    /* 2. some sanity checking */
+    pol = (struct acm_policy_buffer *)policy_buffer;
+
+    if ((ntohl(pol->magic) != ACM_MAGIC) || 
+        (ntohl(pol->policy_version) != ACM_POLICY_VERSION) ||
+        (ntohl(pol->primary_policy_code) != acm_bin_pol.primary_policy_code) ||
+        (ntohl(pol->secondary_policy_code) != 
acm_bin_pol.secondary_policy_code))
     {
-               printkd("%s: Wrong policy magics or versions!\n", __func__);
-               goto error_free;
-       }
-       if (buf_size != ntohl(pol->len))
+        printkd("%s: Wrong policy magics or versions!\n", __func__);
+        goto error_free;
+    }
+    if (buf_size != ntohl(pol->len))
     {
-               printk("%s: ERROR in buf size.\n", __func__);
-               goto error_free;
-       }
-
-       /* get bin_policy lock and rewrite policy (release old one) */
-       write_lock(&acm_bin_pol_rwlock);
-
-       /* 3. set primary policy data */
-       if (acm_primary_ops->set_binary_policy(buf + 
ntohl(pol->primary_buffer_offset),
-                                               
ntohl(pol->secondary_buffer_offset) -
-                                              
ntohl(pol->primary_buffer_offset)))
-               goto error_lock_free;
-
-       /* 4. set secondary policy data */
-       if (acm_secondary_ops->set_binary_policy(buf + 
ntohl(pol->secondary_buffer_offset),
-                                                ntohl(pol->len) - 
-                                                
ntohl(pol->secondary_buffer_offset)))
-               goto error_lock_free;
-
-       write_unlock(&acm_bin_pol_rwlock);
-       xfree(policy_buffer);
-       return ACM_OK;
+        printk("%s: ERROR in buf size.\n", __func__);
+        goto error_free;
+    }
+
+    /* get bin_policy lock and rewrite policy (release old one) */
+    write_lock(&acm_bin_pol_rwlock);
+
+    /* 3. set primary policy data */
+    if (acm_primary_ops->set_binary_policy(buf + 
ntohl(pol->primary_buffer_offset),
+                                           ntohl(pol->secondary_buffer_offset) 
-
+                                           ntohl(pol->primary_buffer_offset)))
+        goto error_lock_free;
+
+    /* 4. set secondary policy data */
+    if (acm_secondary_ops->set_binary_policy(buf + 
ntohl(pol->secondary_buffer_offset),
+                                             ntohl(pol->len) - 
+                                             
ntohl(pol->secondary_buffer_offset)))
+        goto error_lock_free;
+
+    write_unlock(&acm_bin_pol_rwlock);
+    xfree(policy_buffer);
+    return ACM_OK;
 
  error_lock_free:
-       write_unlock(&acm_bin_pol_rwlock);
+    write_unlock(&acm_bin_pol_rwlock);
  error_free:
-       printk("%s: Error setting policy.\n", __func__);
-    xfree(policy_buffer);
-       return -EFAULT;
-}
-
-int
-acm_get_policy(void *buf, u16 buf_size)
-{      
-     u8 *policy_buffer;
-     int ret;
-     struct acm_policy_buffer *bin_pol;
-       
+    printk("%s: Error setting policy.\n", __func__);
+    xfree(policy_buffer);
+    return -EFAULT;
+}
+
+int
+acm_get_policy(void *buf, u32 buf_size)
+{ 
+    u8 *policy_buffer;
+    int ret;
+    struct acm_policy_buffer *bin_pol;
+ 
     if (buf_size < sizeof(struct acm_policy_buffer))
-               return -EFAULT;
-
-     if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
-
-     bin_pol = (struct acm_policy_buffer *)policy_buffer;
-     bin_pol->magic = htonl(ACM_MAGIC);
-     bin_pol->primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
-     bin_pol->secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
-
-     bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
-     bin_pol->primary_buffer_offset = htonl(ntohl(bin_pol->len));
-     bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
+        return -EFAULT;
+
+    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
+
+    bin_pol = (struct acm_policy_buffer *)policy_buffer;
+    bin_pol->magic = htonl(ACM_MAGIC);
+    bin_pol->primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
+    bin_pol->secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
+
+    bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
+    bin_pol->primary_buffer_offset = htonl(ntohl(bin_pol->len));
+    bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
      
-     ret = acm_primary_ops->dump_binary_policy (policy_buffer + 
ntohl(bin_pol->primary_buffer_offset),
-                                      buf_size - 
ntohl(bin_pol->primary_buffer_offset));
-     if (ret < 0)
-         goto error_free_unlock;
-
-     bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
-     bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
-
-     ret = acm_secondary_ops->dump_binary_policy(policy_buffer + 
ntohl(bin_pol->secondary_buffer_offset),
-                                   buf_size - 
ntohl(bin_pol->secondary_buffer_offset));
-     if (ret < 0)
-         goto error_free_unlock;
-
-     bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
-     if (copy_to_user(buf, policy_buffer, ntohl(bin_pol->len)))
-            goto error_free_unlock;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(policy_buffer);
-     return ACM_OK;
+    ret = acm_primary_ops->dump_binary_policy (policy_buffer + 
ntohl(bin_pol->primary_buffer_offset),
+                                               buf_size - 
ntohl(bin_pol->primary_buffer_offset));
+    if (ret < 0)
+        goto error_free_unlock;
+
+    bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+    bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
+
+    ret = acm_secondary_ops->dump_binary_policy(policy_buffer + 
ntohl(bin_pol->secondary_buffer_offset),
+                                                buf_size - 
ntohl(bin_pol->secondary_buffer_offset));
+    if (ret < 0)
+        goto error_free_unlock;
+
+    bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+    if (copy_to_user(buf, policy_buffer, ntohl(bin_pol->len)))
+        goto error_free_unlock;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(policy_buffer);
+    return ACM_OK;
 
  error_free_unlock:
-     read_unlock(&acm_bin_pol_rwlock);
-     printk("%s: Error getting policy.\n", __func__);
-     xfree(policy_buffer);
-     return -EFAULT;
+    read_unlock(&acm_bin_pol_rwlock);
+    printk("%s: Error getting policy.\n", __func__);
+    xfree(policy_buffer);
+    return -EFAULT;
 }
 
 int
 acm_dump_statistics(void *buf, u16 buf_size)
-{      
+{ 
     /* send stats to user space */
-     u8 *stats_buffer;
-     int len1, len2;
-     struct acm_stats_buffer acm_stats;
-
-     if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
+    u8 *stats_buffer;
+    int len1, len2;
+    struct acm_stats_buffer acm_stats;
+
+    if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
      
-     len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer),
-                                            buf_size - sizeof(struct 
acm_stats_buffer));
-     if (len1 < 0)
-            goto error_lock_free;
-            
-     len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer) + len1,
-                                              buf_size - sizeof(struct 
acm_stats_buffer) - len1);
-     if (len2 < 0)
-            goto error_lock_free;
-
-     acm_stats.magic = htonl(ACM_MAGIC);
-     acm_stats.primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
-     acm_stats.secondary_policy_code = 
htonl(acm_bin_pol.secondary_policy_code);
-     acm_stats.primary_stats_offset = htonl(sizeof(struct acm_stats_buffer));
-     acm_stats.secondary_stats_offset = htonl(sizeof(struct acm_stats_buffer) 
+ len1);
-     acm_stats.len = htonl(sizeof(struct acm_stats_buffer) + len1 + len2);
-     memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
-
-     if (copy_to_user(buf, stats_buffer, sizeof(struct acm_stats_buffer) + 
len1 + len2))
-            goto error_lock_free;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(stats_buffer);
-     return ACM_OK;
+    len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer),
+                                            buf_size - sizeof(struct 
acm_stats_buffer));
+    if (len1 < 0)
+        goto error_lock_free;
+      
+    len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer) + len1,
+                                              buf_size - sizeof(struct 
acm_stats_buffer) - len1);
+    if (len2 < 0)
+        goto error_lock_free;
+
+    acm_stats.magic = htonl(ACM_MAGIC);
+    acm_stats.primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
+    acm_stats.secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
+    acm_stats.primary_stats_offset = htonl(sizeof(struct acm_stats_buffer));
+    acm_stats.secondary_stats_offset = htonl(sizeof(struct acm_stats_buffer) + 
len1);
+    acm_stats.len = htonl(sizeof(struct acm_stats_buffer) + len1 + len2);
+
+    memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
+
+    if (copy_to_user(buf, stats_buffer, sizeof(struct acm_stats_buffer) + len1 
+ len2))
+        goto error_lock_free;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(stats_buffer);
+    return ACM_OK;
 
  error_lock_free:
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(stats_buffer);
-     return -EFAULT;
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(stats_buffer);
+    return -EFAULT;
 }
 
 
@@ -198,57 +199,88 @@
 acm_get_ssid(ssidref_t ssidref, u8 *buf, u16 buf_size)
 {
     /* send stats to user space */
-     u8 *ssid_buffer;
-     int ret;
-     struct acm_ssid_buffer *acm_ssid;
-     if (buf_size < sizeof(struct acm_ssid_buffer))
-               return -EFAULT;
-
-     if ((ssid_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
-
-     acm_ssid = (struct acm_ssid_buffer *)ssid_buffer;
-     acm_ssid->len = sizeof(struct acm_ssid_buffer);
-     acm_ssid->ssidref = ssidref;
-     acm_ssid->primary_policy_code = acm_bin_pol.primary_policy_code;
-     acm_ssid->secondary_policy_code = acm_bin_pol.secondary_policy_code;
-     acm_ssid->primary_types_offset = acm_ssid->len;
-
-     /* ret >= 0 --> ret == max_types */
-     ret = acm_primary_ops->dump_ssid_types(ACM_PRIMARY(ssidref),
-                                            ssid_buffer + 
acm_ssid->primary_types_offset,
-                                            buf_size - 
acm_ssid->primary_types_offset);
-     if (ret < 0)
-         goto error_free_unlock;
-
-     acm_ssid->len += ret;
-     acm_ssid->primary_max_types = ret;
-
-     acm_ssid->secondary_types_offset = acm_ssid->len;
-
-     ret = acm_secondary_ops->dump_ssid_types(ACM_SECONDARY(ssidref),
-                                              ssid_buffer + 
acm_ssid->secondary_types_offset,
-                                              buf_size - 
acm_ssid->secondary_types_offset);
-     if (ret < 0)
-         goto error_free_unlock;
-
-     acm_ssid->len += ret;
-     acm_ssid->secondary_max_types = ret;
-
-     if (copy_to_user(buf, ssid_buffer, acm_ssid->len))
-            goto error_free_unlock;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(ssid_buffer);
-     return ACM_OK;
+    u8 *ssid_buffer;
+    int ret;
+    struct acm_ssid_buffer *acm_ssid;
+    if (buf_size < sizeof(struct acm_ssid_buffer))
+        return -EFAULT;
+
+    if ((ssid_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
+
+    acm_ssid = (struct acm_ssid_buffer *)ssid_buffer;
+    acm_ssid->len = sizeof(struct acm_ssid_buffer);
+    acm_ssid->ssidref = ssidref;
+    acm_ssid->primary_policy_code = acm_bin_pol.primary_policy_code;
+    acm_ssid->secondary_policy_code = acm_bin_pol.secondary_policy_code;
+    acm_ssid->primary_types_offset = acm_ssid->len;
+
+    /* ret >= 0 --> ret == max_types */
+    ret = acm_primary_ops->dump_ssid_types(ACM_PRIMARY(ssidref),
+                                           ssid_buffer + 
acm_ssid->primary_types_offset,
+                                           buf_size - 
acm_ssid->primary_types_offset);
+    if (ret < 0)
+        goto error_free_unlock;
+
+    acm_ssid->len += ret;
+    acm_ssid->primary_max_types = ret;
+    acm_ssid->secondary_types_offset = acm_ssid->len;
+
+    ret = acm_secondary_ops->dump_ssid_types(ACM_SECONDARY(ssidref),
+                                             ssid_buffer + 
acm_ssid->secondary_types_offset,
+                                             buf_size - 
acm_ssid->secondary_types_offset);
+    if (ret < 0)
+        goto error_free_unlock;
+
+    acm_ssid->len += ret;
+    acm_ssid->secondary_max_types = ret;
+
+    if (copy_to_user(buf, ssid_buffer, acm_ssid->len))
+        goto error_free_unlock;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(ssid_buffer);
+    return ACM_OK;
 
  error_free_unlock:
-     read_unlock(&acm_bin_pol_rwlock);
-     printk("%s: Error getting ssid.\n", __func__);
-     xfree(ssid_buffer);
-     return -ENOMEM;
-}
-
-/*eof*/
+    read_unlock(&acm_bin_pol_rwlock);
+    printk("%s: Error getting ssid.\n", __func__);
+    xfree(ssid_buffer);
+    return -ENOMEM;
+}
+
+int
+acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2,
+                 enum acm_hook_type hook)
+{
+    int ret = ACM_ACCESS_DENIED;
+    switch (hook) {
+
+    case SHARING:
+        /* SHARING Hook restricts access in STE policy only */
+        ret = acm_sharing(ssidref1, ssidref2);
+        break;
+
+    default:
+        /* deny */
+        break;
+    }
+
+    printkd("%s: ssid1=%x, ssid2=%x, decision=%s.\n",
+            __func__, ssidref1, ssidref2,
+            (ret == ACM_ACCESS_PERMITTED) ? "GRANTED" : "DENIED");
+
+    return ret;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/acm/acm_simple_type_enforcement_hooks.c       Fri Oct 21 11:06:17 2005
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c       Fri Oct 21 11:07:14 2005
@@ -24,6 +24,7 @@
  *     share at least on common type.
  *
  */
+
 #include <xen/lib.h>
 #include <asm/types.h>
 #include <asm/current.h>
@@ -35,34 +36,34 @@
 struct ste_binary_policy ste_bin_pol;
 
 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
-       int i;
-       for(i=0; i< ste_bin_pol.max_types; i++)
-               if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] && 
-                    ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
-                       printkd("%s: common type #%02x.\n", __func__, i);
-                       return 1;
-               }
-       return 0;
+    int i;
+    for(i=0; i< ste_bin_pol.max_types; i++)
+        if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] && 
+             ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
+            printkd("%s: common type #%02x.\n", __func__, i);
+            return 1;
+        }
+    return 0;
 }
 
 /* Helper function: return = (subj and obj share a common type) */
 static int share_common_type(struct domain *subj, struct domain *obj)
 {
-       ssidref_t ref_s, ref_o;
-       int ret;
-
-       if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || 
(obj->ssid == NULL))
-               return 0;
-       read_lock(&acm_bin_pol_rwlock);
-       /* lookup the policy-local ssids */
-       ref_s = ((struct ste_ssid 
*)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                   (struct acm_ssid_domain 
*)subj->ssid)))->ste_ssidref;
-       ref_o = ((struct ste_ssid 
*)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                   (struct acm_ssid_domain 
*)obj->ssid)))->ste_ssidref;
-        /* check whether subj and obj share a common ste type */
-       ret = have_common_type(ref_s, ref_o);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ret;
+    ssidref_t ref_s, ref_o;
+    int ret;
+
+    if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid 
== NULL))
+        return 0;
+    read_lock(&acm_bin_pol_rwlock);
+    /* lookup the policy-local ssids */
+    ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                           (struct acm_ssid_domain 
*)subj->ssid)))->ste_ssidref;
+    ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                           (struct acm_ssid_domain 
*)obj->ssid)))->ste_ssidref;
+    /* check whether subj and obj share a common ste type */
+    ret = have_common_type(ref_s, ref_o);
+    read_unlock(&acm_bin_pol_rwlock);
+    return ret;
 }
 
 /*
@@ -71,26 +72,26 @@
  */
 int acm_init_ste_policy(void)
 {
-       /* minimal startup policy; policy write-locked already */
-       ste_bin_pol.max_types = 1;
-       ste_bin_pol.max_ssidrefs = 2;
-       ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
-       memset(ste_bin_pol.ssidrefs, 0, 2);
-
-       if (ste_bin_pol.ssidrefs == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       /* initialize state so that dom0 can start up and communicate with 
itself */
-       ste_bin_pol.ssidrefs[1] = 1;
-
-       /* init stats */
-       atomic_set(&(ste_bin_pol.ec_eval_count), 0);
-       atomic_set(&(ste_bin_pol.ec_denied_count), 0); 
-       atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
-       atomic_set(&(ste_bin_pol.gt_eval_count), 0);
-       atomic_set(&(ste_bin_pol.gt_denied_count), 0); 
-       atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
-       return ACM_OK;
+    /* minimal startup policy; policy write-locked already */
+    ste_bin_pol.max_types = 1;
+    ste_bin_pol.max_ssidrefs = 2;
+    ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
+    memset(ste_bin_pol.ssidrefs, 0, 2);
+
+    if (ste_bin_pol.ssidrefs == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+ /* initialize state so that dom0 can start up and communicate with itself */
+    ste_bin_pol.ssidrefs[1] = 1;
+
+    /* init stats */
+    atomic_set(&(ste_bin_pol.ec_eval_count), 0);
+    atomic_set(&(ste_bin_pol.ec_denied_count), 0); 
+    atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
+    atomic_set(&(ste_bin_pol.gt_eval_count), 0);
+    atomic_set(&(ste_bin_pol.gt_denied_count), 0); 
+    atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
+    return ACM_OK;
 }
 
 
@@ -98,62 +99,68 @@
 static int
 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
 {
-       int i;
-       struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid); 
-       traceprintk("%s.\n", __func__);
-
-       if (ste_ssidp == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       /* get policy-local ssid reference */
-       ste_ssidp->ste_ssidref = 
GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
-       if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
-           (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
-               printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
-                       __func__, ste_ssidp->ste_ssidref);
-               xfree(ste_ssidp);
-               return ACM_INIT_SSID_ERROR;
-       }
-       /* clean ste cache */
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               ste_ssidp->ste_cache[i].valid = FREE;
-
-       (*ste_ssid) = ste_ssidp;
-       printkd("%s: determined ste_ssidref to %x.\n", 
-              __func__, ste_ssidp->ste_ssidref);
-       return ACM_OK;
+    int i;
+    struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid); 
+    traceprintk("%s.\n", __func__);
+
+    if (ste_ssidp == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    /* get policy-local ssid reference */
+    ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
ssidref);
+    if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
+        (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
+        printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
+                __func__, ste_ssidp->ste_ssidref);
+        xfree(ste_ssidp);
+        return ACM_INIT_SSID_ERROR;
+    }
+    /* clean ste cache */
+    for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+        ste_ssidp->ste_cache[i].valid = FREE;
+
+    (*ste_ssid) = ste_ssidp;
+    printkd("%s: determined ste_ssidref to %x.\n", 
+            __func__, ste_ssidp->ste_ssidref);
+    return ACM_OK;
 }
 
 
 static void
 ste_free_domain_ssid(void *ste_ssid)
 {
-       traceprintk("%s.\n", __func__);
-       if (ste_ssid != NULL)
-               xfree(ste_ssid);
-       return;
+    traceprintk("%s.\n", __func__);
+    if (ste_ssid != NULL)
+        xfree(ste_ssid);
+    return;
 }
 
 /* dump type enforcement cache; policy read-locked already */
 static int 
-ste_dump_policy(u8 *buf, u16 buf_size) {
-     struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
-     int ret = 0;
-
-     ste_buf->ste_max_types = htonl(ste_bin_pol.max_types);
-     ste_buf->ste_max_ssidrefs = htonl(ste_bin_pol.max_ssidrefs);
-     ste_buf->policy_code = htonl(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
-     ste_buf->ste_ssid_offset = htonl(sizeof(struct acm_ste_policy_buffer));
-     ret = ntohl(ste_buf->ste_ssid_offset) +
-            
sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
-
-     /* now copy buffer over */
-     arrcpy(buf + ntohl(ste_buf->ste_ssid_offset),
-           ste_bin_pol.ssidrefs,
-           sizeof(domaintype_t),
-             ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
-
-     return ret;
+ste_dump_policy(u8 *buf, u32 buf_size) {
+    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    int ret = 0;
+
+    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+        return -EINVAL;
+
+    ste_buf->ste_max_types = htonl(ste_bin_pol.max_types);
+    ste_buf->ste_max_ssidrefs = htonl(ste_bin_pol.max_ssidrefs);
+    ste_buf->policy_code = htonl(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
+    ste_buf->ste_ssid_offset = htonl(sizeof(struct acm_ste_policy_buffer));
+    ret = ntohl(ste_buf->ste_ssid_offset) +
+        sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
+
+    if (buf_size < ret)
+        return -EINVAL;
+
+    /* now copy buffer over */
+    arrcpy(buf + ntohl(ste_buf->ste_ssid_offset),
+           ste_bin_pol.ssidrefs,
+           sizeof(domaintype_t),
+           ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
+
+    return ret;
 }
 
 /* ste_init_state is called when a policy is changed to detect violations 
(return != 0).
@@ -176,83 +183,83 @@
     /* go through all domains and adjust policy as if this domain was started 
now */
     pd = &domain_list;
     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-           ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                (struct acm_ssid_domain *)(*pd)->ssid);
-           ste_ssidref = ste_ssid->ste_ssidref;
-           traceprintk("%s: validating policy for eventch domain %x 
(ste-Ref=%x).\n",
-                   __func__, (*pd)->domain_id, ste_ssidref);
-           /* a) check for event channel conflicts */
-           for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
-                   spin_lock(&(*pd)->evtchn_lock);
-                   if ((*pd)->evtchn[port] == NULL) {
-                            spin_unlock(&(*pd)->evtchn_lock);
-                           continue;
-                   }
-                   if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
-                           rdom = 
(*pd)->evtchn[port]->u.interdomain.remote_dom;
-                           rdomid = rdom->domain_id;
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                   } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
-                           rdomid = 
(*pd)->evtchn[port]->u.unbound.remote_domid;
-                           if ((rdom = find_domain_by_id(rdomid)) == NULL) {
-                                   printk("%s: Error finding domain to id 
%x!\n", __func__, rdomid);
-                                   goto out;
-                           }
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                           put_domain(rdom);
-                   } else {
-                           spin_unlock(&(*pd)->evtchn_lock);
-                           continue; /* port unused */
-                   }
-                   spin_unlock(&(*pd)->evtchn_lock);
-
-                   /* rdom now has remote domain */
-                   ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                            (struct acm_ssid_domain 
*)(rdom->ssid));
-                   ste_rssidref = ste_rssid->ste_ssidref;
-                   traceprintk("%s: eventch: domain %x (ssidref %x) --> domain 
%x (rssidref %x) used (port %x).\n", 
-                           __func__, (*pd)->domain_id, ste_ssidref, 
rdom->domain_id, ste_rssidref, port);  
-                   /* check whether on subj->ssid, obj->ssid share a common 
type*/
-                   if (!have_common_type(ste_ssidref, ste_rssidref)) {
-                           printkd("%s: Policy violation in event channel 
domain %x -> domain %x.\n",
-                                   __func__, (*pd)->domain_id, rdomid);
-                           goto out;
-                   }
-           }   
-           /* b) check for grant table conflicts on shared pages */
-           if ((*pd)->grant_table->shared == NULL) {
-                   printkd("%s: Grant ... sharing for domain %x not setup!\n", 
__func__, (*pd)->domain_id);
-                   continue;
-           }
-           for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
-                   sha_copy =  (*pd)->grant_table->shared[i];
-                   if ( sha_copy.flags ) {
-                           printkd("%s: grant dom (%hu) SHARED (%d) 
flags:(%hx) dom:(%hu) frame:(%lx)\n",
-                                   __func__, (*pd)->domain_id, i, 
sha_copy.flags, sha_copy.domid, 
-                                   (unsigned long)sha_copy.frame);
-                           rdomid = sha_copy.domid;
-                           if ((rdom = find_domain_by_id(rdomid)) == NULL) {
-                                   printkd("%s: domain not found ERROR!\n", 
__func__);
-                                   goto out;
-                           };
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                           put_domain(rdom);
-                           if (!have_common_type(ste_ssidref, ste_rssidref)) {
-                                   printkd("%s: Policy violation in grant 
table sharing domain %x -> domain %x.\n",
-                                           __func__, (*pd)->domain_id, rdomid);
-                                   goto out;
-                           }
-                   }
-           }
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        ste_ssidref = ste_ssid->ste_ssidref;
+        traceprintk("%s: validating policy for eventch domain %x 
(ste-Ref=%x).\n",
+                    __func__, (*pd)->domain_id, ste_ssidref);
+        /* a) check for event channel conflicts */
+        for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
+            spin_lock(&(*pd)->evtchn_lock);
+            if ((*pd)->evtchn[port] == NULL) {
+                spin_unlock(&(*pd)->evtchn_lock);
+                continue;
+            }
+            if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
+                rdom = (*pd)->evtchn[port]->u.interdomain.remote_dom;
+                rdomid = rdom->domain_id;
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+            } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
+                rdomid = (*pd)->evtchn[port]->u.unbound.remote_domid;
+                if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+                    printk("%s: Error finding domain to id %x!\n", __func__, 
rdomid);
+                    goto out;
+                }
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+                put_domain(rdom);
+            } else {
+                spin_unlock(&(*pd)->evtchn_lock);
+                continue; /* port unused */
+            }
+            spin_unlock(&(*pd)->evtchn_lock);
+
+            /* rdom now has remote domain */
+            ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                  (struct acm_ssid_domain *)(rdom->ssid));
+            ste_rssidref = ste_rssid->ste_ssidref;
+            traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x 
(rssidref %x) used (port %x).\n", 
+                        __func__, (*pd)->domain_id, ste_ssidref, 
rdom->domain_id, ste_rssidref, port);  
+            /* check whether on subj->ssid, obj->ssid share a common type*/
+            if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                printkd("%s: Policy violation in event channel domain %x -> 
domain %x.\n",
+                        __func__, (*pd)->domain_id, rdomid);
+                goto out;
+            }
+        } 
+        /* b) check for grant table conflicts on shared pages */
+        if ((*pd)->grant_table->shared == NULL) {
+            printkd("%s: Grant ... sharing for domain %x not setup!\n", 
__func__, (*pd)->domain_id);
+            continue;
+        }
+        for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
+            sha_copy =  (*pd)->grant_table->shared[i];
+            if ( sha_copy.flags ) {
+                printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) 
frame:(%lx)\n",
+                        __func__, (*pd)->domain_id, i, sha_copy.flags, 
sha_copy.domid, 
+                        (unsigned long)sha_copy.frame);
+                rdomid = sha_copy.domid;
+                if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+                    printkd("%s: domain not found ERROR!\n", __func__);
+                    goto out;
+                };
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+                put_domain(rdom);
+                if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                    printkd("%s: Policy violation in grant table sharing 
domain %x -> domain %x.\n",
+                            __func__, (*pd)->domain_id, rdomid);
+                    goto out;
+                }
+            }
+        }
     }
     violation = 0;
  out:
@@ -267,110 +274,78 @@
 
 /* set new policy; policy write-locked already */
 static int
-ste_set_policy(u8 *buf, u16 buf_size) 
-{
-     struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
-     void *ssidrefsbuf;
-     struct ste_ssid *ste_ssid;
-     struct domain **pd;
-     int i;
-
-     /* Convert endianess of policy */
-     ste_buf->policy_code = ntohl(ste_buf->policy_code);
-     ste_buf->policy_version = ntohl(ste_buf->policy_version);
-     ste_buf->ste_max_types = ntohl(ste_buf->ste_max_types);
-     ste_buf->ste_max_ssidrefs = ntohl(ste_buf->ste_max_ssidrefs);
-     ste_buf->ste_ssid_offset = ntohl(ste_buf->ste_ssid_offset);
-
-     /* policy type and version checks */
-     if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
-        (ste_buf->policy_version != ACM_STE_VERSION))
-            return -EINVAL;
-
-     /* 1. create and copy-in new ssidrefs buffer */
-     ssidrefsbuf = xmalloc_array(u8, 
sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
-     if (ssidrefsbuf == NULL) {
-            return -ENOMEM;
-     }
-     if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * 
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
-         goto error_free;
-
-     arrcpy(ssidrefsbuf, 
-            buf + ste_buf->ste_ssid_offset,
-            sizeof(domaintype_t),
-           ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
-
-     /* 2. now re-calculate sharing decisions based on running domains; 
-      *    this can fail if new policy is conflicting with sharing of running 
domains 
-      *    now: reject violating new policy; future: adjust sharing through 
revoking sharing */
-     if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
-            printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n", __func__);
-            goto error_free; /* new policy conflicts with sharing of running 
domains */
-     }
-     /* 3. replace old policy (activate new policy) */
-     ste_bin_pol.max_types = ste_buf->ste_max_types;
-     ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
-     if (ste_bin_pol.ssidrefs) 
-            xfree(ste_bin_pol.ssidrefs);
-     ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
-
-     /* clear all ste caches */
-     read_lock(&domlist_lock);
-     pd = &domain_list;
-     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               ste_ssid->ste_cache[i].valid = FREE;
-     }
-     read_unlock(&domlist_lock);
-     return ACM_OK;
-
-error_free:
-       printk("%s: ERROR setting policy.\n", __func__);
-       if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
-       return -EFAULT;
+ste_set_policy(u8 *buf, u32 buf_size)
+{
+    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    void *ssidrefsbuf;
+    struct ste_ssid *ste_ssid;
+    struct domain **pd;
+    int i;
+
+    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+        return -EINVAL;
+
+    /* Convert endianess of policy */
+    ste_buf->policy_code = ntohl(ste_buf->policy_code);
+    ste_buf->policy_version = ntohl(ste_buf->policy_version);
+    ste_buf->ste_max_types = ntohl(ste_buf->ste_max_types);
+    ste_buf->ste_max_ssidrefs = ntohl(ste_buf->ste_max_ssidrefs);
+    ste_buf->ste_ssid_offset = ntohl(ste_buf->ste_ssid_offset);
+
+    /* policy type and version checks */
+    if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
+        (ste_buf->policy_version != ACM_STE_VERSION))
+        return -EINVAL;
+
+    /* 1. create and copy-in new ssidrefs buffer */
+    ssidrefsbuf = xmalloc_array(u8, 
sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
+    if (ssidrefsbuf == NULL) {
+        return -ENOMEM;
+    }
+    if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * 
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
+        goto error_free;
+
+    arrcpy(ssidrefsbuf, 
+           buf + ste_buf->ste_ssid_offset,
+           sizeof(domaintype_t),
+           ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
+
+    /* 2. now re-calculate sharing decisions based on running domains; 
+     *    this can fail if new policy is conflicting with sharing of running 
domains 
+     *    now: reject violating new policy; future: adjust sharing through 
revoking sharing */
+    if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
+        printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n", __func__);
+        goto error_free; /* new policy conflicts with sharing of running 
domains */
+    }
+    /* 3. replace old policy (activate new policy) */
+    ste_bin_pol.max_types = ste_buf->ste_max_types;
+    ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
+    if (ste_bin_pol.ssidrefs) 
+        xfree(ste_bin_pol.ssidrefs);
+    ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
+
+    /* clear all ste caches */
+    read_lock(&domlist_lock);
+    pd = &domain_list;
+    for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+            ste_ssid->ste_cache[i].valid = FREE;
+    }
+    read_unlock(&domlist_lock);
+    return ACM_OK;
+
+ error_free:
+    printk("%s: ERROR setting policy.\n", __func__);
+    if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
+    return -EFAULT;
 }
 
 static int 
 ste_dump_stats(u8 *buf, u16 buf_len)
 {
     struct acm_ste_stats_buffer stats;
-
-#ifdef ACM_DEBUG
-    int i;
-    struct ste_ssid *ste_ssid;
-    struct domain **pd;
-
-    printk("ste: Decision caches:\n");
-    /* go through all domains and adjust policy as if this domain was started 
now */
-    read_lock(&domlist_lock); /* go by domain? or directly by global? 
event/grant list */
-    pd = &domain_list;
-    for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-        printk("ste: Cache Domain %02x.\n", (*pd)->domain_id);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               printk("\t\tcache[%02x] = %s, domid=%x.\n", i,
-                      (ste_ssid->ste_cache[i].valid == VALID) ? 
-                      "VALID" : "FREE",
-                      (ste_ssid->ste_cache[i].valid == VALID) ? 
-                      ste_ssid->ste_cache[i].id : 0xffffffff);
-    }
-    read_unlock(&domlist_lock);
-    /* init stats */
-    printk("STE-Policy Security Hook Statistics:\n");
-    printk("ste: event_channel eval_count      = %x\n", 
atomic_read(&(ste_bin_pol.ec_eval_count)));
-    printk("ste: event_channel denied_count    = %x\n", 
atomic_read(&(ste_bin_pol.ec_denied_count))); 
-    printk("ste: event_channel cache_hit_count = %x\n", 
atomic_read(&(ste_bin_pol.ec_cachehit_count)));
-    printk("ste:\n");
-    printk("ste: grant_table   eval_count      = %x\n", 
atomic_read(&(ste_bin_pol.gt_eval_count)));
-    printk("ste: grant_table   denied_count    = %x\n", 
atomic_read(&(ste_bin_pol.gt_denied_count))); 
-    printk("ste: grant_table   cache_hit_count = %x\n", 
atomic_read(&(ste_bin_pol.gt_cachehit_count)));
-#endif
-
-    if (buf_len < sizeof(struct acm_ste_stats_buffer))
-           return -ENOMEM;
 
     /* now send the hook counts to user space */
     stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
@@ -379,6 +354,10 @@
     stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count)); 
     stats.ec_cachehit_count = 
htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
     stats.gt_cachehit_count = 
htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
+
+    if (buf_len < sizeof(struct acm_ste_stats_buffer))
+        return -ENOMEM;
+
     memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
     return sizeof(struct acm_ste_stats_buffer);
 }
@@ -392,12 +371,12 @@
     if (ste_bin_pol.max_types > len)
         return -EFAULT;
 
-       if (ssidref >= ste_bin_pol.max_ssidrefs)
-               return -EFAULT;
+    if (ssidref >= ste_bin_pol.max_ssidrefs)
+        return -EFAULT;
 
     /* read types for chwall ssidref */
     for(i=0; i< ste_bin_pol.max_types; i++) {
-               if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
+        if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
             buf[i] = 1;
         else
             buf[i] = 0;
@@ -409,40 +388,40 @@
  * returns 1 == cache hit */
 static int inline
 check_cache(struct domain *dom, domid_t rdom) {
-       struct ste_ssid *ste_ssid;
-       int i;
-
-       printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(dom)->ssid);
-
-       for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
-               if ((ste_ssid->ste_cache[i].valid == VALID) &&
-                   (ste_ssid->ste_cache[i].id == rdom)) {
-                       printkd("cache hit (entry %x, id= %x!\n", i, 
ste_ssid->ste_cache[i].id);
-                       return 1;
-               }
-       }
-       return 0;
+    struct ste_ssid *ste_ssid;
+    int i;
+
+    printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
+    ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                         (struct acm_ssid_domain *)(dom)->ssid);
+
+    for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
+        if ((ste_ssid->ste_cache[i].valid == VALID) &&
+            (ste_ssid->ste_cache[i].id == rdom)) {
+            printkd("cache hit (entry %x, id= %x!\n", i, 
ste_ssid->ste_cache[i].id);
+            return 1;
+        }
+    }
+    return 0;
 }
 
 
 /* we only get here if there is NO entry yet; no duplication check! */
 static void inline
 cache_result(struct domain *subj, struct domain *obj) {
-       struct ste_ssid *ste_ssid;
-       int i;
-       printkd("caching from doms: %x --> %x.\n", subj->domain_id, 
obj->domain_id);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(subj)->ssid);
-       for(i=0; i< ACM_TE_CACHE_SIZE; i++)
-               if (ste_ssid->ste_cache[i].valid == FREE)
-                       break;
-       if (i< ACM_TE_CACHE_SIZE) {
-               ste_ssid->ste_cache[i].valid = VALID;
-               ste_ssid->ste_cache[i].id = obj->domain_id;
-       } else
-               printk ("Cache of dom %x is full!\n", subj->domain_id);
+    struct ste_ssid *ste_ssid;
+    int i;
+    printkd("caching from doms: %x --> %x.\n", subj->domain_id, 
obj->domain_id);
+    ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                         (struct acm_ssid_domain *)(subj)->ssid);
+    for(i=0; i< ACM_TE_CACHE_SIZE; i++)
+        if (ste_ssid->ste_cache[i].valid == FREE)
+            break;
+    if (i< ACM_TE_CACHE_SIZE) {
+        ste_ssid->ste_cache[i].valid = VALID;
+        ste_ssid->ste_cache[i].id = obj->domain_id;
+    } else
+        printk ("Cache of dom %x is full!\n", subj->domain_id);
 }
 
 /* deletes entries for domain 'id' from all caches (re-use) */
@@ -458,12 +437,12 @@
     read_lock(&domlist_lock); /* look through caches of all domains */
     pd = &domain_list;
     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-           if ((ste_ssid->ste_cache[i].valid == VALID) &&
-               (ste_ssid->ste_cache[i].id = id))
-                   ste_ssid->ste_cache[i].valid = FREE;
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+            if ((ste_ssid->ste_cache[i].valid == VALID) &&
+                (ste_ssid->ste_cache[i].id = id))
+                ste_ssid->ste_cache[i].valid = FREE;
     }
     read_unlock(&domlist_lock);
 }
@@ -482,15 +461,15 @@
     read_lock(&acm_bin_pol_rwlock);
     ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
     if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
-       printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", 
__func__);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_DENIED; /* catching and indicating config error */
+        printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", 
__func__);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED; /* catching and indicating config error */
     }
     if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
-       printk("%s: ERROR ste_ssidref > max(%x).\n", 
-              __func__, ste_bin_pol.max_ssidrefs-1);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_DENIED;
+        printk("%s: ERROR ste_ssidref > max(%x).\n", 
+               __func__, ste_bin_pol.max_ssidrefs-1);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;
     }
     read_unlock(&acm_bin_pol_rwlock);
     return ACM_ACCESS_PERMITTED;
@@ -506,163 +485,193 @@
 /* -------- EVENTCHANNEL OPERATIONS -----------*/
 static int
 ste_pre_eventchannel_unbound(domid_t id) {
-       struct domain *subj, *obj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", 
-                   __func__, current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.ec_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.ec_eval_count);
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.ec_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
+    struct domain *subj, *obj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", 
+                __func__, current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.ec_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.ec_eval_count);
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.ec_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
 }
 
 static int
 ste_pre_eventchannel_interdomain(domid_t id1, domid_t id2)
 {
-       struct domain *subj, *obj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
-                   (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
-
-       /* following is a bit longer but ensures that we
-         * "put" only domains that we where "find"-ing 
-        */
-       if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
-       if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
-
-       subj = find_domain_by_id(id1);
-       obj  = find_domain_by_id(id2);
-       if ((subj == NULL) || (obj == NULL)) {
-               ret = ACM_ACCESS_DENIED;
-               goto out;
-       }
-       /* cache check late, but evtchn is not on performance critical path */
-       if (check_cache(subj, obj->domain_id)) {
-               atomic_inc(&ste_bin_pol.ec_cachehit_count);
-               ret = ACM_ACCESS_PERMITTED;
-               goto out;
-       }
-       atomic_inc(&ste_bin_pol.ec_eval_count);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.ec_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
+    struct domain *subj, *obj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
+                (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
+
+    /* following is a bit longer but ensures that we
+     * "put" only domains that we where "find"-ing 
+     */
+    if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
+    if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
+
+    subj = find_domain_by_id(id1);
+    obj  = find_domain_by_id(id2);
+    if ((subj == NULL) || (obj == NULL)) {
+        ret = ACM_ACCESS_DENIED;
+        goto out;
+    }
+    /* cache check late, but evtchn is not on performance critical path */
+    if (check_cache(subj, obj->domain_id)) {
+        atomic_inc(&ste_bin_pol.ec_cachehit_count);
+        ret = ACM_ACCESS_PERMITTED;
+        goto out;
+    }
+    atomic_inc(&ste_bin_pol.ec_eval_count);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.ec_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
  out:
-       if (obj != NULL)
-               put_domain(obj);
-       if (subj != NULL)
-               put_domain(subj);
-       return ret;
+    if (obj != NULL)
+        put_domain(obj);
+    if (subj != NULL)
+        put_domain(subj);
+    return ret;
 }
 
 /* -------- SHARED MEMORY OPERATIONS -----------*/
 
 static int
 ste_pre_grant_map_ref (domid_t id) {
-       struct domain *obj, *subj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.gt_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.gt_eval_count);
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.gt_denied_count); 
-               printkd("%s: ACCESS DENIED!\n", __func__);
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
-}
+    struct domain *obj, *subj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.gt_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.gt_eval_count);
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.gt_denied_count); 
+        printkd("%s: ACCESS DENIED!\n", __func__);
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
+}
+
 
 /* since setting up grant tables involves some implicit information
    flow from the creating domain to the domain that is setup, we 
    check types in addition to the general authorization */
 static int
 ste_pre_grant_setup (domid_t id) {
-       struct domain *obj, *subj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.gt_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.gt_eval_count);
-       /* a) check authorization (eventually use specific capabilities) */
-       if (!IS_PRIV(current->domain)) {
-               printk("%s: Grant table management authorization denied 
ERROR!\n", __func__);
-               return ACM_ACCESS_DENIED;
-       }
-       /* b) check types */
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.gt_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
-}
+    struct domain *obj, *subj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.gt_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.gt_eval_count);
+    /* a) check authorization (eventually use specific capabilities) */
+    if (!IS_PRIV(current->domain)) {
+        printk("%s: Grant table management authorization denied ERROR!\n", 
__func__);
+        return ACM_ACCESS_DENIED;
+    }
+    /* b) check types */
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.gt_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
+}
+
+/* -------- DOMAIN-Requested Decision hooks -----------*/
+
+static int
+ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
+    if (have_common_type (
+        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
+        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
+        ))
+        return ACM_ACCESS_PERMITTED;
+    else
+        return ACM_ACCESS_DENIED;
+}
+
 
 /* now define the hook structure similarly to LSM */
 struct acm_operations acm_simple_type_enforcement_ops = {
-       /* policy management services */
-       .init_domain_ssid               = ste_init_domain_ssid,
-       .free_domain_ssid               = ste_free_domain_ssid,
-       .dump_binary_policy     = ste_dump_policy,
-       .set_binary_policy      = ste_set_policy,
-       .dump_statistics                = ste_dump_stats,
+
+    /* policy management services */
+    .init_domain_ssid  = ste_init_domain_ssid,
+    .free_domain_ssid  = ste_free_domain_ssid,
+    .dump_binary_policy     = ste_dump_policy,
+    .set_binary_policy      = ste_set_policy,
+    .dump_statistics  = ste_dump_stats,
     .dump_ssid_types        = ste_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = ste_pre_domain_create,
-       .post_domain_create         = NULL,
-       .fail_domain_create     = NULL,
-       .post_domain_destroy    = ste_post_domain_destroy,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound   = ste_pre_eventchannel_unbound,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = ste_pre_eventchannel_interdomain,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref      = ste_pre_grant_map_ref,
-       .fail_grant_map_ref     = NULL,
-       .pre_grant_setup        = ste_pre_grant_setup,
-       .fail_grant_setup       = NULL,
+
+    /* domain management control hooks */
+    .pre_domain_create       = ste_pre_domain_create,
+    .post_domain_create     = NULL,
+    .fail_domain_create     = NULL,
+    .post_domain_destroy    = ste_post_domain_destroy,
+
+    /* event channel control hooks */
+    .pre_eventchannel_unbound   = ste_pre_eventchannel_unbound,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
+    .fail_eventchannel_interdomain  = NULL,
+
+    /* grant table control hooks */
+    .pre_grant_map_ref      = ste_pre_grant_map_ref,
+    .fail_grant_map_ref     = NULL,
+    .pre_grant_setup        = ste_pre_grant_setup,
+    .fail_grant_setup       = NULL,
+    .sharing                = ste_sharing,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Oct 21 11:06:17 2005
+++ b/xen/arch/x86/domain.c     Fri Oct 21 11:07:14 2005
@@ -254,6 +254,7 @@
 void arch_do_createdomain(struct vcpu *v)
 {
     struct domain *d = v->domain;
+    l1_pgentry_t gdt_l1e;
     int vcpuid;
 
     if ( is_idle_task(d) )
@@ -282,12 +283,10 @@
      * GDT, and the old VCPU# is invalid in the new domain, we would otherwise
      * try to load CS from an invalid table.
      */
+    gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
     for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
-    {
         d->arch.mm_perdomain_pt[
-            (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] =
-            l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
-    }
+            (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = gdt_l1e;
 
     v->arch.guest_vtable  = __linear_l2_table;
     v->arch.shadow_vtable = __shadow_linear_l2_table;
diff -r 19af31a59537 -r f31494465fb0 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Fri Oct 21 11:06:17 2005
+++ b/xen/arch/x86/setup.c      Fri Oct 21 11:07:14 2005
@@ -141,7 +141,7 @@
 static void __init start_of_day(void)
 {
     int i;
-    unsigned long vgdt;
+    unsigned long vgdt, gdt_pfn;
 
     early_cpu_init();
 
@@ -164,10 +164,10 @@
      * noted in arch_do_createdomain(), we must map for every possible VCPU#.
      */
     vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
+    gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
     {
-        map_pages_to_xen(
-            vgdt, virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
+        map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
         vgdt += 1 << PDPT_VCPU_VA_SHIFT;
     }
 
diff -r 19af31a59537 -r f31494465fb0 xen/common/acm_ops.c
--- a/xen/common/acm_ops.c      Fri Oct 21 11:06:17 2005
+++ b/xen/common/acm_ops.c      Fri Oct 21 11:07:14 2005
@@ -31,22 +31,23 @@
 
 #if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
 
-long do_acm_op(acm_op_t * u_acm_op)
+long do_acm_op(struct acm_op * u_acm_op)
 {
     return -ENOSYS;
 }
 
 #else
 
-typedef enum acm_operation {
+enum acm_operation {
     POLICY,                     /* access to policy interface (early drop) */
     GETPOLICY,                  /* dump policy cache */
     SETPOLICY,                  /* set policy cache (controls security) */
     DUMPSTATS,                  /* dump policy statistics */
-    GETSSID                     /* retrieve ssidref for domain id */
-} acm_operation_t;
-
-int acm_authorize_acm_ops(struct domain *d, acm_operation_t pops)
+    GETSSID,                    /* retrieve ssidref for domain id (decide 
inside authorized domains) */
+    GETDECISION                 /* retrieve ACM decision from authorized 
domains */
+};
+
+int acm_authorize_acm_ops(struct domain *d, enum acm_operation pops)
 {
     /* all policy management functions are restricted to privileged domains,
      * soon we will introduce finer-grained privileges for policy operations
@@ -59,10 +60,10 @@
     return ACM_ACCESS_PERMITTED;
 }
 
-long do_acm_op(acm_op_t * u_acm_op)
+long do_acm_op(struct acm_op * u_acm_op)
 {
     long ret = 0;
-    acm_op_t curop, *op = &curop;
+    struct acm_op curop, *op = &curop;
 
     /* check here policy decision for policy commands */
     /* for now allow DOM0 only, later indepedently    */
@@ -78,81 +79,148 @@
     switch (op->cmd)
     {
     case ACM_SETPOLICY:
-        {
-            if (acm_authorize_acm_ops(current->domain, SETPOLICY))
-                return -EACCES;
-            printkd("%s: setting policy.\n", __func__);
-            ret = acm_set_policy(op->u.setpolicy.pushcache,
-                                 op->u.setpolicy.pushcache_size, 1);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, SETPOLICY))
+            return -EACCES;
+        printkd("%s: setting policy.\n", __func__);
+        ret = acm_set_policy(op->u.setpolicy.pushcache,
+                             op->u.setpolicy.pushcache_size, 1);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_GETPOLICY:
-        {
-            if (acm_authorize_acm_ops(current->domain, GETPOLICY))
-                return -EACCES;
-            printkd("%s: getting policy.\n", __func__);
-            ret = acm_get_policy(op->u.getpolicy.pullcache,
-                                 op->u.getpolicy.pullcache_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, GETPOLICY))
+            return -EACCES;
+        printkd("%s: getting policy.\n", __func__);
+        ret = acm_get_policy(op->u.getpolicy.pullcache,
+                             op->u.getpolicy.pullcache_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_DUMPSTATS:
-        {
-            if (acm_authorize_acm_ops(current->domain, DUMPSTATS))
-                return -EACCES;
-            printkd("%s: dumping statistics.\n", __func__);
-            ret = acm_dump_statistics(op->u.dumpstats.pullcache,
-                                      op->u.dumpstats.pullcache_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, DUMPSTATS))
+            return -EACCES;
+        printkd("%s: dumping statistics.\n", __func__);
+        ret = acm_dump_statistics(op->u.dumpstats.pullcache,
+                                  op->u.dumpstats.pullcache_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_GETSSID:
-        {
-                       ssidref_t ssidref;
-
-            if (acm_authorize_acm_ops(current->domain, GETSSID))
-                return -EACCES;
-
-                       if (op->u.getssid.get_ssid_by == SSIDREF)
-                               ssidref = op->u.getssid.id.ssidref;
-                       else if (op->u.getssid.get_ssid_by == DOMAINID) {
-                               struct domain *subj = 
find_domain_by_id(op->u.getssid.id.domainid);
-                               if (!subj)
-                                       return -ESRCH; /* domain not found */
-
-                               ssidref = ((struct acm_ssid_domain 
*)(subj->ssid))->ssidref;
-                               put_domain(subj);
-                       } else
-                               return -ESRCH;
-
-            ret = acm_get_ssid(ssidref,
-                               op->u.getssid.ssidbuf,
-                               op->u.getssid.ssidbuf_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        ssidref_t ssidref;
+
+        if (acm_authorize_acm_ops(current->domain, GETSSID))
+            return -EACCES;
+        printkd("%s: getting SSID.\n", __func__);
+        if (op->u.getssid.get_ssid_by == SSIDREF)
+            ssidref = op->u.getssid.id.ssidref;
+        else if (op->u.getssid.get_ssid_by == DOMAINID) {
+            struct domain *subj = find_domain_by_id(op->u.getssid.id.domainid);
+            if (!subj)
+                return -ESRCH; /* domain not found */
+
+            ssidref = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else
+            return -ESRCH;
+
+        ret = acm_get_ssid(ssidref,
+                           op->u.getssid.ssidbuf,
+                           op->u.getssid.ssidbuf_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
+
+    case ACM_GETDECISION:
+    {
+        ssidref_t ssidref1, ssidref2;
+
+        if (acm_authorize_acm_ops(current->domain, GETDECISION)) {
+            ret = -EACCES;
+            goto out;
+        }
+        printkd("%s: getting access control decision.\n", __func__);
+        if (op->u.getdecision.get_decision_by1 == SSIDREF) {
+            ssidref1 = op->u.getdecision.id1.ssidref;
+        }
+        else if (op->u.getdecision.get_decision_by1 == DOMAINID) {
+            struct domain *subj = 
find_domain_by_id(op->u.getdecision.id1.domainid);
+            if (!subj) {
+                ret = -ESRCH; /* domain not found */
+                goto out;
+            }
+            ssidref1 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else {
+            ret = -ESRCH;
+            goto out;
+        }
+        if (op->u.getdecision.get_decision_by2 == SSIDREF) {
+            ssidref2 = op->u.getdecision.id2.ssidref;
+        }
+        else if (op->u.getdecision.get_decision_by2 == DOMAINID) {
+            struct domain *subj = 
find_domain_by_id(op->u.getdecision.id2.domainid);
+            if (!subj) {
+                ret = -ESRCH; /* domain not found */
+                goto out;
+            }
+            ssidref2 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else {
+            ret = -ESRCH;
+            goto out;
+        }
+        ret = acm_get_decision(ssidref1, ssidref2, op->u.getdecision.hook);
+    }
+    break;
 
     default:
         ret = -ESRCH;
-
-    }
+    }
+
+ out:
+    if (ret == ACM_ACCESS_PERMITTED) {
+        op->u.getdecision.acm_decision = ACM_ACCESS_PERMITTED;
+        ret = 0;
+    } else if  (ret == ACM_ACCESS_DENIED) {
+        op->u.getdecision.acm_decision = ACM_ACCESS_DENIED;
+        ret = 0;
+    } else {
+        op->u.getdecision.acm_decision = ACM_ACCESS_DENIED;
+        if (ret > 0)
+            ret = -ret;
+    }
+    /* copy decision back to user space */
+    copy_to_user(u_acm_op, op, sizeof(*op));
     return ret;
 }
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c     Fri Oct 21 11:06:17 2005
+++ b/xen/common/dom0_ops.c     Fri Oct 21 11:07:14 2005
@@ -199,7 +199,7 @@
         /*
          * If we're on a HT system, we only use the first HT for dom0, other 
          * domains will all share the second HT of each CPU. Since dom0 is on 
-            * CPU 0, we favour high numbered CPUs in the event of a tie.
+         * CPU 0, we favour high numbered CPUs in the event of a tie.
          */
         pro = smp_num_siblings - 1;
         for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
diff -r 19af31a59537 -r f31494465fb0 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Fri Oct 21 11:06:17 2005
+++ b/xen/common/sched_sedf.c   Fri Oct 21 11:07:14 2005
@@ -1150,7 +1150,7 @@
     inf->block_tot++;
 #endif
     if (unlikely(now < PERIOD_BEGIN(inf))) {
-       PRINT(4,"extratime unblock\n");
+        PRINT(4,"extratime unblock\n");
         /* unblocking in extra-time! */
 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
         if (inf->status & EXTRA_WANT_PEN_Q) {
@@ -1459,3 +1459,13 @@
     .wake           = sedf_wake,
     .adjdom         = sedf_adjdom,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/include/acm/acm_core.h
--- a/xen/include/acm/acm_core.h        Fri Oct 21 11:06:17 2005
+++ b/xen/include/acm/acm_core.h        Fri Oct 21 11:07:14 2005
@@ -15,6 +15,7 @@
  *    for the access control module and relevant policies
  *
  */
+
 #ifndef _ACM_CORE_H
 #define _ACM_CORE_H
 
@@ -25,30 +26,30 @@
 
 /* Xen-internal representation of the binary policy */
 struct acm_binary_policy {
-       u16 primary_policy_code;
-       u16 secondary_policy_code;
-       void *primary_binary_policy;                                 
-       void *secondary_binary_policy;
-       
+    u16 primary_policy_code;
+    u16 secondary_policy_code;
+    void *primary_binary_policy;                                 
+    void *secondary_binary_policy;
+ 
 };
 
 struct chwall_binary_policy {
-       u16 max_types;
-       u16 max_ssidrefs;
-       u16 max_conflictsets;
-       domaintype_t *ssidrefs;                 /* [max_ssidrefs][max_types]    
*/
-       domaintype_t *conflict_aggregate_set;   /* [max_types]                  
*/
-       domaintype_t *running_types;            /* [max_types]                  
*/
-       domaintype_t *conflict_sets;            /* 
[max_conflictsets][max_types]*/
+    u32 max_types;
+    u32 max_ssidrefs;
+    u32 max_conflictsets;
+    domaintype_t *ssidrefs;     /* [max_ssidrefs][max_types]  */
+    domaintype_t *conflict_aggregate_set;  /* [max_types]      */
+    domaintype_t *running_types;    /* [max_types]      */
+    domaintype_t *conflict_sets;   /* [max_conflictsets][max_types]*/
 };
 
 struct ste_binary_policy {
-       u16 max_types;
-       u16 max_ssidrefs;
-       domaintype_t *ssidrefs;                 /* [max_ssidrefs][max_types]    
*/
-       atomic_t ec_eval_count, gt_eval_count;
-       atomic_t ec_denied_count, gt_denied_count; 
-       atomic_t ec_cachehit_count, gt_cachehit_count;
+    u32 max_types;
+    u32 max_ssidrefs;
+    domaintype_t *ssidrefs;     /* [max_ssidrefs][max_types]  */
+    atomic_t ec_eval_count, gt_eval_count;
+    atomic_t ec_denied_count, gt_denied_count; 
+    atomic_t ec_cachehit_count, gt_cachehit_count;
 };
 
 /* global acm policy */
@@ -63,7 +64,7 @@
 
 /* defines number of access decisions to other domains can be cached
  * one entry per domain, TE does not distinguish evtchn or grant_table */
-#define ACM_TE_CACHE_SIZE      8
+#define ACM_TE_CACHE_SIZE 8
 enum acm_ste_flag { VALID, FREE };
 
 /* cache line:
@@ -72,57 +73,67 @@
  *                 on domain cache_line.id
  */
 struct acm_ste_cache_line {
-       enum acm_ste_flag valid;
-       domid_t id;
+    enum acm_ste_flag valid;
+    domid_t id;
 };
 
 /* general definition of a subject security id */
 struct acm_ssid_domain {
-       enum acm_datatype datatype;             /* type of subject (e.g., 
partition) */
-       ssidref_t         ssidref;              /* combined security reference 
*/
-       void              *primary_ssid;        /* primary policy ssid part 
(e.g. chinese wall) */
-       void              *secondary_ssid;      /* secondary policy ssid part 
(e.g. type enforcement) */
-       struct domain     *subject;             /* backpointer to subject 
structure */
-       domid_t           domainid;             /* replicate id */
+    enum acm_datatype datatype; /* type of subject (e.g., partition) */
+    ssidref_t ssidref;   /* combined security reference */
+    void *primary_ssid;   /* primary policy ssid part (e.g. chinese wall) */
+    void *secondary_ssid;    /* secondary policy ssid part (e.g. type 
enforcement) */
+    struct domain *subject;     /* backpointer to subject structure */
+    domid_t domainid;   /* replicate id */
 };
 
 /* chinese wall ssid type */
 struct chwall_ssid {
-       ssidref_t chwall_ssidref;
+    ssidref_t chwall_ssidref;
 };
 
 /* simple type enforcement ssid type */
 struct ste_ssid {
-       ssidref_t ste_ssidref;
-       struct acm_ste_cache_line ste_cache[ACM_TE_CACHE_SIZE]; /* decision 
cache */
+    ssidref_t ste_ssidref;
+    struct acm_ste_cache_line ste_cache[ACM_TE_CACHE_SIZE]; /* decision cache 
*/
 };
 
 /* macros to access ssidref for primary / secondary policy 
- *     primary ssidref   = lower 16 bit
- *      secondary ssidref = higher 16 bit
+ * primary ssidref   = lower 16 bit
+ *  secondary ssidref = higher 16 bit
  */
 #define ACM_PRIMARY(ssidref) \
-       ((ssidref) & 0xffff)
+ ((ssidref) & 0xffff)
 
 #define ACM_SECONDARY(ssidref) \
-       ((ssidref) >> 16)
+ ((ssidref) >> 16)
 
 #define GET_SSIDREF(POLICY, ssidref) \
-       ((POLICY) == acm_bin_pol.primary_policy_code) ? \
-       ACM_PRIMARY(ssidref) : ACM_SECONDARY(ssidref)
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ACM_PRIMARY(ssidref) : ACM_SECONDARY(ssidref)
 
 /* macros to access ssid pointer for primary / secondary policy */
 #define GET_SSIDP(POLICY, ssid) \
-       ((POLICY) == acm_bin_pol.primary_policy_code) ? \
-       ((ssid)->primary_ssid) : ((ssid)->secondary_ssid)
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ((ssid)->primary_ssid) : ((ssid)->secondary_ssid)
 
 /* protos */
 int acm_init_domain_ssid(domid_t id, ssidref_t ssidref);
-int acm_free_domain_ssid(struct acm_ssid_domain *ssid);
-int acm_set_policy(void *buf, u16 buf_size, int isuserbuffer);
-int acm_get_policy(void *buf, u16 buf_size);
+void acm_free_domain_ssid(struct acm_ssid_domain *ssid);
+int acm_set_policy(void *buf, u32 buf_size, int isuserbuffer);
+int acm_get_policy(void *buf, u32 buf_size);
 int acm_dump_statistics(void *buf, u16 buf_size);
 int acm_get_ssid(ssidref_t ssidref, u8 *buf, u16 buf_size);
+int acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2, enum 
acm_hook_type hook);
 
 #endif
 
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/include/acm/acm_endian.h
--- a/xen/include/acm/acm_endian.h      Fri Oct 21 11:06:17 2005
+++ b/xen/include/acm/acm_endian.h      Fri Oct 21 11:07:14 2005
@@ -18,6 +18,7 @@
  * big-endian policy interface
  *
  */
+
 #ifndef _ACM_ENDIAN_H
 #define _ACM_ENDIAN_H
 
@@ -30,10 +31,10 @@
 {
     if (little_endian)
         return 
-           ( (((x) >> 24) & 0xff      )| 
-             (((x) >>  8) & 0xff00    )| 
-             (((x) <<  8) & 0xff0000  )|
-             (((x) << 24) & 0xff000000) );
+            ( (((x) >> 24) & 0xff      )| 
+              (((x) >>  8) & 0xff00    )| 
+              (((x) <<  8) & 0xff0000  )|
+              (((x) << 24) & 0xff000000) );
     else
         return x;
 }
@@ -42,10 +43,10 @@
 {
     if (little_endian)
         return 
-           ( (((x) >> 8) & 0xff   )|
-             (((x) << 8) & 0xff00 ) );
+            ( (((x) >> 8) & 0xff   )|
+              (((x) << 8) & 0xff00 ) );
     else
-       return x;
+        return x;
 }
 
 #define htonl(x) ntohl(x)
@@ -55,8 +56,8 @@
 {
     unsigned int i = 0;
     while (i < n) {
-               dest[i] = htons(src[i]);
-               i++;
+        dest[i] = htons(src[i]);
+        i++;
     }
 }
 
@@ -64,8 +65,8 @@
 {
     unsigned int i = 0;
     while (i < n) {
-       dest[i] = htonl(src[i]);
-       i++;
+        dest[i] = htonl(src[i]);
+        i++;
     }
 }
 
@@ -86,3 +87,13 @@
 }
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/include/acm/acm_hooks.h
--- a/xen/include/acm/acm_hooks.h       Fri Oct 21 11:06:17 2005
+++ b/xen/include/acm/acm_hooks.h       Fri Oct 21 11:07:14 2005
@@ -15,6 +15,7 @@
  *      sHype hooks that are called throughout Xen.
  * 
  */
+
 #ifndef _ACM_HOOKS_H
 #define _ACM_HOOKS_H
 
@@ -89,8 +90,8 @@
     /* policy management functions (must always be defined!) */
     int  (*init_domain_ssid)           (void **ssid, ssidref_t ssidref);
     void (*free_domain_ssid)           (void *ssid);
-    int  (*dump_binary_policy)         (u8 *buffer, u16 buf_size);
-    int  (*set_binary_policy)          (u8 *buffer, u16 buf_size);
+    int  (*dump_binary_policy)         (u8 *buffer, u32 buf_size);
+    int  (*set_binary_policy)          (u8 *buffer, u32 buf_size);
     int  (*dump_statistics)            (u8 *buffer, u16 buf_size);
     int  (*dump_ssid_types)            (ssidref_t ssidref, u8 *buffer, u16 
buf_size);
     /* domain management control hooks (can be NULL) */
@@ -108,6 +109,8 @@
     void (*fail_grant_map_ref)         (domid_t id);
     int  (*pre_grant_setup)            (domid_t id);
     void (*fail_grant_setup)           (domid_t id);
+    /* generic domain-requested decision hooks (can be NULL) */
+    int (*sharing)                     (ssidref_t ssidref1, ssidref_t 
ssidref2);
 };
 
 /* global variables */
@@ -144,6 +147,8 @@
 { return 0; }
 static inline void acm_post_domain0_create(domid_t domid) 
 { return; }
+static inline int acm_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
+{ return 0; }
 
 #else
 
@@ -281,7 +286,8 @@
         break;
     case EVTCHNOP_bind_interdomain:
         ret = acm_pre_eventchannel_interdomain(
-            op->u.bind_interdomain.dom1, op->u.bind_interdomain.dom2);
+            current->domain->domain_id,
+            op->u.bind_interdomain.remote_dom);
         break;
     default:
         ret = 0; /* ok */
@@ -341,6 +347,18 @@
     acm_post_domain_create(domid, ACM_DOM0_SSIDREF);
 }
 
+static inline int acm_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
+{
+    if ((acm_primary_ops->sharing != NULL) &&
+        acm_primary_ops->sharing(ssidref1, ssidref2))
+        return ACM_ACCESS_DENIED;
+    else if ((acm_secondary_ops->sharing != NULL) &&
+             acm_secondary_ops->sharing(ssidref1, ssidref2)) {
+        return ACM_ACCESS_DENIED;
+    } else
+        return ACM_ACCESS_PERMITTED;
+}
+
 extern int acm_init(unsigned int *initrdidx,
                     const multiboot_info_t *mbi,
                     unsigned long start);
@@ -348,3 +366,13 @@
 #endif
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 19af31a59537 -r f31494465fb0 xen/include/public/acm.h
--- a/xen/include/public/acm.h  Fri Oct 21 11:06:17 2005
+++ b/xen/include/public/acm.h  Fri Oct 21 11:07:14 2005
@@ -52,9 +52,9 @@
 #define ACM_ERROR          -4
 
 /* External ACCESS DECISIONS */
-#define ACM_ACCESS_PERMITTED  0
-#define ACM_ACCESS_DENIED  -111
-#define ACM_NULL_POINTER_ERROR  -200
+#define ACM_ACCESS_PERMITTED        0
+#define ACM_ACCESS_DENIED           -111
+#define ACM_NULL_POINTER_ERROR      -200
 
 /* primary policy in lower 4 bits */
 #define ACM_NULL_POLICY 0
@@ -83,6 +83,9 @@
 
 /* defines a ssid reference used by xen */
 typedef uint32_t ssidref_t;
+
+/* hooks that are known to domains */
+enum acm_hook_type {NONE=0, SHARING};
 
 /* -------security policy relevant type definitions-------- */
 
diff -r 19af31a59537 -r f31494465fb0 xen/include/public/acm_ops.h
--- a/xen/include/public/acm_ops.h      Fri Oct 21 11:06:17 2005
+++ b/xen/include/public/acm_ops.h      Fri Oct 21 11:07:14 2005
@@ -27,36 +27,36 @@
  * This makes sure that old versions of acm tools will stop working in a
  * well-defined way (rather than crashing the machine, for instance).
  */
-#define ACM_INTERFACE_VERSION   0xAAAA0004
+#define ACM_INTERFACE_VERSION   0xAAAA0005
 
 /************************************************************************/
 
 #define ACM_SETPOLICY         4
-typedef struct acm_setpolicy {
+struct acm_setpolicy {
     /* OUT variables */
     void *pushcache;
-    uint16_t pushcache_size;
-} acm_setpolicy_t;
+    uint32_t pushcache_size;
+};
 
 
 #define ACM_GETPOLICY         5
-typedef struct acm_getpolicy {
+struct acm_getpolicy {
     /* OUT variables */
     void *pullcache;
-    uint16_t pullcache_size;
-} acm_getpolicy_t;
+    uint32_t pullcache_size;
+};
 
 
 #define ACM_DUMPSTATS         6
-typedef struct acm_dumpstats {
+struct acm_dumpstats {
     void *pullcache;
-    uint16_t pullcache_size;
-} acm_dumpstats_t;
+    uint32_t pullcache_size;
+};
 
 
 #define ACM_GETSSID           7
-enum get_type {UNSET, SSIDREF, DOMAINID};
-typedef struct acm_getssid {
+enum get_type {UNSET=0, SSIDREF, DOMAINID};
+struct acm_getssid {
     enum get_type get_ssid_by;
     union {
         domaintype_t domainid;
@@ -64,18 +64,35 @@
     } id;
     void *ssidbuf;
     uint16_t ssidbuf_size;
-} acm_getssid_t;
+};
 
-typedef struct acm_op {
+#define ACM_GETDECISION        8
+struct acm_getdecision {
+    enum get_type get_decision_by1; /* in */
+    enum get_type get_decision_by2;
+    union {
+        domaintype_t domainid;
+        ssidref_t    ssidref;
+    } id1;
+    union {
+        domaintype_t domainid;
+        ssidref_t    ssidref;
+    } id2;
+    enum acm_hook_type hook;
+    int acm_decision;           /* out */
+};
+
+struct acm_op {
     uint32_t cmd;
     uint32_t interface_version;      /* ACM_INTERFACE_VERSION */
     union {
-        acm_setpolicy_t setpolicy;
-        acm_getpolicy_t getpolicy;
-        acm_dumpstats_t dumpstats;
-        acm_getssid_t getssid;
+        struct acm_setpolicy setpolicy;
+        struct acm_getpolicy getpolicy;
+        struct acm_dumpstats dumpstats;
+        struct acm_getssid getssid;
+        struct acm_getdecision getdecision;
     } u;
-} acm_op_t;
+};
 
 #endif                          /* __XEN_PUBLIC_ACM_OPS_H__ */
 
diff -r 19af31a59537 -r f31494465fb0 patches/linux-2.6.12/2.6.12.6.patch
--- /dev/null   Fri Oct 21 11:06:17 2005
+++ b/patches/linux-2.6.12/2.6.12.6.patch       Fri Oct 21 11:07:14 2005
@@ -0,0 +1,1738 @@
+diff --git a/Makefile b/Makefile
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 12
+-EXTRAVERSION =
++EXTRAVERSION = .6
+ NAME=Woozy Numbat
+ 
+ # *DOCUMENTATION*
+@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
+ #(which is the most common case IMHO) to avoid unneeded clutter in the big 
tags file.
+ #Adding $(srctree) adds about 20M on i386 to the size of the output file!
+ 
+-ifeq ($(KBUILD_OUTPUT),)
++ifeq ($(src),$(obj))
+ __srctree =
+ else
+ __srctree = $(srctree)/
+diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+@@ -44,7 +44,7 @@
+ 
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 1.40.2"
++#define VERSION "version 1.40.4"
+ #include "powernow-k8.h"
+ 
+ /* serialize freq changes  */
+@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
+ {
+       struct powernow_k8_data *data;
+       cpumask_t oldmask = CPU_MASK_ALL;
+-      int rc;
++      int rc, i;
+ 
+       if (!check_supported_cpu(pol->cpu))
+               return -ENODEV;
+@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
+       printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+              data->currfid, data->currvid);
+ 
+-      powernow_data[pol->cpu] = data;
++      for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
++              powernow_data[i] = data;
++      }
+ 
+       return 0;
+ 
+diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
+--- a/arch/i386/kernel/process.c
++++ b/arch/i386/kernel/process.c
+@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+ 
++      memset(&info, 0, sizeof(info));
++
+       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ 
+       info.entry_number = idx;
+diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
+@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
+                               *data = (pt->cr_ipsr & IPSR_MASK);
+                       return 0;
+ 
++                    case PT_AR_RSC:
++                      if (write_access)
++                              pt->ar_rsc = *data | (3 << 2); /* force PL3 */
++                      else
++                              *data = pt->ar_rsc;
++                      return 0;
++
+                     case PT_AR_RNAT:
+                       urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+                       rnat_addr = (long) ia64_rse_rnat_addr((long *)
+@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
+                     case PT_AR_BSPSTORE:
+                       ptr = pt_reg_addr(pt, ar_bspstore);
+                       break;
+-                    case PT_AR_RSC:
+-                      ptr = pt_reg_addr(pt, ar_rsc);
+-                      break;
+                     case PT_AR_UNAT:
+                       ptr = pt_reg_addr(pt, ar_unat);
+                       break;
+@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
+ static long
+ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user 
*ppr)
+ {
+-      unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
++      unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
+       struct unw_frame_info info;
+       struct switch_stack *sw;
+       struct ia64_fpreg fpval;
+@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
+       /* app regs */
+ 
+       retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+-      retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
++      retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
+       retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+       retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+       retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
+       retval |= __get_user(nat_bits, &ppr->nat);
+ 
+       retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
++      retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
+       retval |= access_uarea(child, PT_AR_EC, &ec, 1);
+       retval |= access_uarea(child, PT_AR_LC, &lc, 1);
+       retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
+ static long
+ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
+ {
+-      unsigned long ip, flags, nat, um, cfm;
++      unsigned long ip, flags, nat, um, cfm, rsc;
+       long err;
+ 
+       /* Always make any pending restarted system calls return -EINTR */
+@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
+       err |= __get_user(ip, &sc->sc_ip);                      /* instruction 
pointer */
+       err |= __get_user(cfm, &sc->sc_cfm);
+       err |= __get_user(um, &sc->sc_um);                      /* user mask */
+-      err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
++      err |= __get_user(rsc, &sc->sc_ar_rsc);
+       err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
+       err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
+       err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
+       err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);       /* r15 
*/
+ 
+       scr->pt.cr_ifs = cfm | (1UL << 63);
++      scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
+ 
+       /* establish new instruction pointer: */
+       scr->pt.cr_iip = ip & ~0x3UL;
+diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
+--- a/arch/ppc/kernel/time.c
++++ b/arch/ppc/kernel/time.c
+@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
+ 
+ extern unsigned long wall_jiffies;
+ 
++/* used for timezone offset */
++static long timezone_offset;
++
+ DEFINE_SPINLOCK(rtc_lock);
+ 
+ EXPORT_SYMBOL(rtc_lock);
+@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
+                    xtime.tv_sec - last_rtc_update >= 659 &&
+                    abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 
500000/HZ &&
+                    jiffies - wall_jiffies == 1) {
+-                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) 
== 0)
++                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + 
timezone_offset) == 0)
+                               last_rtc_update = xtime.tv_sec+1;
+                       else
+                               /* Try again one minute later */
+@@ -286,7 +289,7 @@ void __init time_init(void)
+       unsigned old_stamp, stamp, elapsed;
+ 
+         if (ppc_md.time_init != NULL)
+-                time_offset = ppc_md.time_init();
++                timezone_offset = ppc_md.time_init();
+ 
+       if (__USE_RTC()) {
+               /* 601 processor: dec counts down by 128 every 128ns */
+@@ -331,10 +334,10 @@ void __init time_init(void)
+       set_dec(tb_ticks_per_jiffy);
+ 
+       /* If platform provided a timezone (pmac), we correct the time */
+-        if (time_offset) {
+-              sys_tz.tz_minuteswest = -time_offset / 60;
++        if (timezone_offset) {
++              sys_tz.tz_minuteswest = -timezone_offset / 60;
+               sys_tz.tz_dsttime = 0;
+-              xtime.tv_sec -= time_offset;
++              xtime.tv_sec -= timezone_offset;
+         }
+         set_normalized_timespec(&wall_to_monotonic,
+                                 -xtime.tv_sec, -xtime.tv_nsec);
+diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
+--- a/arch/ppc64/boot/zlib.c
++++ b/arch/ppc64/boot/zlib.c
+@@ -1307,7 +1307,7 @@ local int huft_build(
+   {
+     *t = (inflate_huft *)Z_NULL;
+     *m = 0;
+-    return Z_OK;
++    return Z_DATA_ERROR;
+   }
+ 
+ 
+@@ -1351,6 +1351,7 @@ local int huft_build(
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];                   /* set n to length of v */
+ 
+ 
+   /* Generate the Huffman codes and for each, make the table entries */
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
+       return(arg.pid);
+ }
+ 
+-static int ptrace_child(void)
++static int ptrace_child(void *arg)
+ {
+       int ret;
+       int pid = os_getpid(), ppid = getppid();
+@@ -159,16 +159,20 @@ static int ptrace_child(void)
+       _exit(ret);
+ }
+ 
+-static int start_ptraced_child(void)
++static int start_ptraced_child(void **stack_out)
+ {
++      void *stack;
++      unsigned long sp;
+       int pid, n, status;
+       
+-      pid = fork();
+-      if(pid == 0)
+-              ptrace_child();
+-
++      stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
++                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
++      if(stack == MAP_FAILED)
++              panic("check_ptrace : mmap failed, errno = %d", errno);
++      sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
++      pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
+       if(pid < 0)
+-              panic("check_ptrace : fork failed, errno = %d", errno);
++              panic("check_ptrace : clone failed, errno = %d", errno);
+       CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+       if(n < 0)
+               panic("check_ptrace : wait failed, errno = %d", errno);
+@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
+               panic("check_ptrace : expected SIGSTOP, got status = %d",
+                     status);
+ 
++      *stack_out = stack;
+       return(pid);
+ }
+ 
+@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
+  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
+  * So only for SYSEMU features we test mustpanic, while normal host features
+  * must work anyway!*/
+-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
++static int stop_ptraced_child(int pid, void *stack, int exitcode, int 
mustpanic)
+ {
+       int status, n, ret = 0;
+ 
+       if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
+-              panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
++              panic("check_ptrace : ptrace failed, errno = %d", errno);
+       CATCH_EINTR(n = waitpid(pid, &status, 0));
+       if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
+               int exit_with = WEXITSTATUS(status);
+@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
+               printk("check_ptrace : child exited with exitcode %d, while "
+                     "expecting %d; status 0x%x", exit_with,
+                     exitcode, status);
+-              if (mustexit)
++              if (mustpanic)
+                       panic("\n");
+               else
+                       printk("\n");
+               ret = -1;
+       }
+ 
++      if(munmap(stack, PAGE_SIZE) < 0)
++              panic("check_ptrace : munmap failed, errno = %d", errno);
+       return ret;
+ }
+ 
+@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
+ 
+ static void __init check_sysemu(void)
+ {
++      void *stack;
+       int pid, syscall, n, status, count=0;
+ 
+       printk("Checking syscall emulation patch for ptrace...");
+       sysemu_supported = 0;
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
+               goto fail;
+@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
+               panic("check_sysemu : failed to modify system "
+                     "call return, errno = %d", errno);
+ 
+-      if (stop_ptraced_child(pid, 0, 0) < 0)
++      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+               goto fail_stopped;
+ 
+       sysemu_supported = 1;
+@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
+       set_using_sysemu(!force_sysemu_disabled);
+ 
+       printk("Checking advanced syscall emulation patch for ptrace...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+       while(1){
+               count++;
+               if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
+@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
+                       break;
+               }
+       }
+-      if (stop_ptraced_child(pid, 0, 0) < 0)
++      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+               goto fail_stopped;
+ 
+       sysemu_supported = 2;
+@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
+       return;
+ 
+ fail:
+-      stop_ptraced_child(pid, 1, 0);
++      stop_ptraced_child(pid, stack, 1, 0);
+ fail_stopped:
+       printk("missing\n");
+ }
+ 
+ void __init check_ptrace(void)
+ {
++      void *stack;
+       int pid, syscall, n, status;
+ 
+       printk("Checking that ptrace can change system call numbers...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) 
< 0)
+               panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", 
errno);
+@@ -330,7 +339,7 @@ void __init check_ptrace(void)
+                       break;
+               }
+       }
+-      stop_ptraced_child(pid, 0, 1);
++      stop_ptraced_child(pid, stack, 0, 1);
+       printk("OK\n");
+       check_sysemu();
+ }
+@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
+ static inline int check_skas3_ptrace_support(void)
+ {
+       struct ptrace_faultinfo fi;
++      void *stack;
+       int pid, n, ret = 1;
+ 
+       printf("Checking for the skas3 patch in the host...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
+       if (n < 0) {
+@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
+       }
+ 
+       init_registers(pid);
+-      stop_ptraced_child(pid, 1, 1);
++      stop_ptraced_child(pid, stack, 1, 1);
+ 
+       return(ret);
+ }
+diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
+--- a/arch/x86_64/ia32/syscall32.c
++++ b/arch/x86_64/ia32/syscall32.c
+@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
+       int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
++      int ret;
+ 
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       if (!vma)
+@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
+       vma->vm_mm = mm;
+ 
+       down_write(&mm->mmap_sem);
+-      insert_vm_struct(mm, vma);
++      if ((ret = insert_vm_struct(mm, vma))) {
++              up_write(&mm->mmap_sem);
++              kmem_cache_free(vm_area_cachep, vma);
++              return ret;
++      }
+       mm->total_vm += npages;
+       up_write(&mm->mmap_sem);
+       return 0;
+diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
+--- a/arch/x86_64/kernel/setup.c
++++ b/arch/x86_64/kernel/setup.c
+@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
+       int cpu = smp_processor_id();
+       int node = 0;
+       unsigned bits;
+-      if (c->x86_num_cores == 1)
+-              return;
+ 
+       bits = 0;
+       while ((1 << bits) < c->x86_num_cores)
+diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
+--- a/arch/x86_64/kernel/smp.c
++++ b/arch/x86_64/kernel/smp.c
+@@ -284,6 +284,71 @@ struct call_data_struct {
+ static struct call_data_struct * call_data;
+ 
+ /*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ */
++static void __smp_call_function_single (int cpu, void (*func) (void *info), 
void *info,
++                              int nonatomic, int wait)
++{
++      struct call_data_struct data;
++      int cpus = 1;
++
++      data.func = func;
++      data.info = info;
++      atomic_set(&data.started, 0);
++      data.wait = wait;
++      if (wait)
++              atomic_set(&data.finished, 0);
++
++      call_data = &data;
++      wmb();
++      /* Send a message to all other CPUs and wait for them to respond */
++      send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++      /* Wait for response */
++      while (atomic_read(&data.started) != cpus)
++              cpu_relax();
++
++      if (!wait)
++              return;
++
++      while (atomic_read(&data.finished) != cpus)
++              cpu_relax();
++}
++
++/*
++ * Run a function on another CPU
++ *  <func>    The function to run. This must be fast and non-blocking.
++ *  <info>    An arbitrary pointer to pass to the function.
++ *  <nonatomic>       Currently unused.
++ *  <wait>    If true, wait until function has completed on other CPUs.
++ *  [RETURNS]   0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
++      int nonatomic, int wait)
++{
++      
++      int me = get_cpu(); /* prevent preemption and reschedule on another 
processor */
++
++      if (cpu == me) {
++              printk("%s: trying to call self\n", __func__);
++              put_cpu();
++              return -EBUSY;
++      }
++      spin_lock_bh(&call_lock);
++
++      __smp_call_function_single(cpu, func,info,nonatomic,wait);      
++
++      spin_unlock_bh(&call_lock);
++      put_cpu();
++      return 0;
++}
++
++/*
+  * this function sends a 'generic call function' IPI to all other CPUs
+  * in the system.
+  */
+diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
+--- a/arch/x86_64/kernel/smpboot.c
++++ b/arch/x86_64/kernel/smpboot.c
+@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
+ {
+       unsigned long flags, i;
+ 
+-      if (smp_processor_id() != boot_cpu_id)
+-              return;
+-
+       go[MASTER] = 0;
+ 
+       local_irq_save(flags);
+@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
+       return tcenter - best_tm;
+ }
+ 
+-static __cpuinit void sync_tsc(void)
++static __cpuinit void sync_tsc(unsigned int master)
+ {
+       int i, done = 0;
+       long delta, adj, adjust_latency = 0;
+@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
+       } t[NUM_ROUNDS] __cpuinitdata;
+ #endif
+ 
++      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
++              smp_processor_id(), master);
++
+       go[MASTER] = 1;
+ 
+-      smp_call_function(sync_master, NULL, 1, 0);
++      /* It is dangerous to broadcast IPI as cpus are coming up,
++       * as they may not be ready to accept them.  So since
++       * we only need to send the ipi to the boot cpu direct
++       * the message, and avoid the race.
++       */
++      smp_call_function_single(master, sync_master, NULL, 1, 0);
+ 
+       while (go[MASTER])      /* wait for master to be ready */
+               no_cpu_relax();
+@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
+       printk(KERN_INFO
+              "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
+              "maxerr %lu cycles)\n",
+-             smp_processor_id(), boot_cpu_id, delta, rt);
++             smp_processor_id(), master, delta, rt);
+ }
+ 
+ static void __cpuinit tsc_sync_wait(void)
+ {
+       if (notscsync || !cpu_has_tsc)
+               return;
+-      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
+-                      boot_cpu_id);
+-      sync_tsc();
++      sync_tsc(0);
+ }
+ 
+ static __init int notscsync_setup(char *s)
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
+               printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
+                       pci_name(dev), ('A' + pin));
+               /* Interrupt Line values above 0xF are forbidden */
+-              if (dev->irq >= 0 && (dev->irq <= 0xF)) {
++              if (dev->irq > 0 && (dev->irq <= 0xF)) {
+                       printk(" - using IRQ %d\n", dev->irq);
++                      acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, 
ACPI_ACTIVE_LOW);
+                       return_VALUE(0);
+               }
+               else {
+diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
+--- a/drivers/char/rocket.c
++++ b/drivers/char/rocket.c
+@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
+               ToRecv = space;
+ 
+       if (ToRecv <= 0)
+-              return;
++              goto done;
+ 
+       /*
+        * if status indicates there are errored characters in the
+@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
+       }
+       /*  Push the data up to the tty layer */
+       ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
++done:
+       tty_ldisc_deref(ld);
+ }
+ 
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -32,12 +32,6 @@
+ 
+ #define       TPM_BUFSIZE                     2048
+ 
+-/* PCI configuration addresses */
+-#define       PCI_GEN_PMCON_1                 0xA0
+-#define       PCI_GEN1_DEC                    0xE4
+-#define       PCI_LPC_EN                      0xE6
+-#define       PCI_GEN2_DEC                    0xEC
+-
+ static LIST_HEAD(tpm_chip_list);
+ static DEFINE_SPINLOCK(driver_lock);
+ static int dev_mask[32];
+@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
+ EXPORT_SYMBOL_GPL(tpm_time_expired);
+ 
+ /*
+- * Initialize the LPC bus and enable the TPM ports
+- */
+-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
+-{
+-      u32 lpcenable, tmp;
+-      int is_lpcm = 0;
+-
+-      switch (pci_dev->vendor) {
+-      case PCI_VENDOR_ID_INTEL:
+-              switch (pci_dev->device) {
+-              case PCI_DEVICE_ID_INTEL_82801CA_12:
+-              case PCI_DEVICE_ID_INTEL_82801DB_12:
+-                      is_lpcm = 1;
+-                      break;
+-              }
+-              /* init ICH (enable LPC) */
+-              pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
+-              lpcenable |= 0x20000000;
+-              pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
+-
+-              if (is_lpcm) {
+-                      pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
+-                                            &lpcenable);
+-                      if ((lpcenable & 0x20000000) == 0) {
+-                              dev_err(&pci_dev->dev,
+-                                      "cannot enable LPC\n");
+-                              return -ENODEV;
+-                      }
+-              }
+-
+-              /* initialize TPM registers */
+-              pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
+-
+-              if (!is_lpcm)
+-                      tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
+-              else
+-                      tmp =
+-                          (tmp & 0xFFFF0000) | (base & 0xFFF0) |
+-                          0x00000001;
+-
+-              pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
+-
+-              if (is_lpcm) {
+-                      pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-                                            &tmp);
+-                      tmp |= 0x00000004;      /* enable CLKRUN */
+-                      pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-                                             tmp);
+-              }
+-              tpm_write_index(0x0D, 0x55);    /* unlock 4F */
+-              tpm_write_index(0x0A, 0x00);    /* int disable */
+-              tpm_write_index(0x08, base);    /* base addr lo */
+-              tpm_write_index(0x09, (base & 0xFF00) >> 8);    /* base addr hi 
*/
+-              tpm_write_index(0x0D, 0xAA);    /* lock 4F */
+-              break;
+-      case PCI_VENDOR_ID_AMD:
+-              /* nothing yet */
+-              break;
+-      }
+-
+-      return 0;
+-}
+-
+-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
+-
+-/*
+  * Internal kernel interface to transmit TPM commands
+  */
+ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
+       if (chip == NULL)
+               return -ENODEV;
+ 
+-      spin_lock(&driver_lock);
+-      tpm_lpc_bus_init(pci_dev, chip->vendor->base);
+-      spin_unlock(&driver_lock);
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
+ }
+ 
+ extern void tpm_time_expired(unsigned long);
+-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
+-
+ extern int tpm_register_hardware(struct pci_dev *,
+                                struct tpm_vendor_specific *);
+ extern int tpm_open(struct inode *, struct file *);
+diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
+--- a/drivers/char/tpm/tpm_atmel.c
++++ b/drivers/char/tpm/tpm_atmel.c
+@@ -22,7 +22,10 @@
+ #include "tpm.h"
+ 
+ /* Atmel definitions */
+-#define       TPM_ATML_BASE                   0x400
++enum tpm_atmel_addr {
++      TPM_ATMEL_BASE_ADDR_LO = 0x08,
++      TPM_ATMEL_BASE_ADDR_HI = 0x09
++};
+ 
+ /* write status bits */
+ #define       ATML_STATUS_ABORT               0x01
+@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
+       .cancel = tpm_atml_cancel,
+       .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+       .req_complete_val = ATML_STATUS_DATA_AVAIL,
+-      .base = TPM_ATML_BASE,
+       .miscdev = { .fops = &atmel_ops, },
+ };
+ 
+@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
+ {
+       u8 version[4];
+       int rc = 0;
++      int lo, hi;
+ 
+       if (pci_enable_device(pci_dev))
+               return -EIO;
+ 
+-      if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
+-              rc = -ENODEV;
+-              goto out_err;
+-      }
++      lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
++      hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
++
++      tpm_atmel.base = (hi<<8)|lo;
++      dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
+ 
+       /* verify that it is an Atmel part */
+       if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
+diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
+--- a/drivers/char/tpm/tpm_nsc.c
++++ b/drivers/char/tpm/tpm_nsc.c
+@@ -24,6 +24,10 @@
+ /* National definitions */
+ #define       TPM_NSC_BASE                    0x360
+ #define       TPM_NSC_IRQ                     0x07
++#define       TPM_NSC_BASE0_HI                0x60
++#define       TPM_NSC_BASE0_LO                0x61
++#define       TPM_NSC_BASE1_HI                0x62
++#define       TPM_NSC_BASE1_LO                0x63
+ 
+ #define       NSC_LDN_INDEX                   0x07
+ #define       NSC_SID_INDEX                   0x20
+@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
+       .cancel = tpm_nsc_cancel,
+       .req_complete_mask = NSC_STATUS_OBF,
+       .req_complete_val = NSC_STATUS_OBF,
+-      .base = TPM_NSC_BASE,
+       .miscdev = { .fops = &nsc_ops, },
+       
+ };
+@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
+                                 const struct pci_device_id *pci_id)
+ {
+       int rc = 0;
++      int lo, hi;
++
++      hi = tpm_read_index(TPM_NSC_BASE0_HI);
++      lo = tpm_read_index(TPM_NSC_BASE0_LO);
++
++      tpm_nsc.base = (hi<<8) | lo;
+ 
+       if (pci_enable_device(pci_dev))
+               return -EIO;
+ 
+-      if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
+-              rc = -ENODEV;
+-              goto out_err;
+-      }
+-
+       /* verify that it is a National part (SID) */
+       if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
+               rc = -ENODEV;
+diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
+--- a/drivers/char/tty_ioctl.c
++++ b/drivers/char/tty_ioctl.c
+@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
+                       ld = tty_ldisc_ref(tty);
+                       switch (arg) {
+                       case TCIFLUSH:
+-                              if (ld->flush_buffer)
++                              if (ld && ld->flush_buffer)
+                                       ld->flush_buffer(tty);
+                               break;
+                       case TCIOFLUSH:
+-                              if (ld->flush_buffer)
++                              if (ld && ld->flush_buffer)
+                                       ld->flush_buffer(tty);
+                               /* fall through */
+                       case TCOFLUSH:
+diff --git a/drivers/media/video/cx88/cx88-video.c 
b/drivers/media/video/cx88/cx88-video.c
+--- a/drivers/media/video/cx88/cx88-video.c
++++ b/drivers/media/video/cx88/cx88-video.c
+@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
+                       .default_value = 0,
+                       .type          = V4L2_CTRL_TYPE_INTEGER,
+               },
+-              .off                   = 0,
++              .off                   = 128,
+               .reg                   = MO_HUE,
+               .mask                  = 0x00ff,
+               .shift                 = 0,
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
+       tso = e1000_tso(adapter, skb);
+       if (tso < 0) {
+               dev_kfree_skb_any(skb);
++              spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               return NETDEV_TX_OK;
+       }
+ 
+diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
+--- a/drivers/net/hamradio/Kconfig
++++ b/drivers/net/hamradio/Kconfig
+@@ -17,7 +17,7 @@ config MKISS
+ 
+ config 6PACK
+       tristate "Serial port 6PACK driver"
+-      depends on AX25 && BROKEN_ON_SMP
++      depends on AX25
+       ---help---
+         6pack is a transmission protocol for the data exchange between your
+         PC and your TNC (the Terminal Node Controller acts as a kind of
+diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
+--- a/drivers/net/shaper.c
++++ b/drivers/net/shaper.c
+@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
+ {
+       struct shaper *shaper = dev->priv;
+       struct sk_buff *ptr;
+-   
+-      if (down_trylock(&shaper->sem))
+-              return -1;
+ 
++      spin_lock(&shaper->lock);
+       ptr=shaper->sendq.prev;
+       
+       /*
+@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
+                 shaper->stats.collisions++;
+       }
+       shaper_kick(shaper);
+-      up(&shaper->sem);
++      spin_unlock(&shaper->lock);
+       return 0;
+ }
+ 
+@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
+ {
+       struct shaper *shaper = (struct shaper *)data;
+ 
+-      if (!down_trylock(&shaper->sem)) {
+-              shaper_kick(shaper);
+-              up(&shaper->sem);
+-      } else
+-              mod_timer(&shaper->timer, jiffies);
++      spin_lock(&shaper->lock);
++      shaper_kick(shaper);
++      spin_unlock(&shaper->lock);
+ }
+ 
+ /*
+@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
+ 
+ 
+ /*
+- *    Flush the shaper queues on a closedown
+- */
+- 
+-static void shaper_flush(struct shaper *shaper)
+-{
+-      struct sk_buff *skb;
+-
+-      down(&shaper->sem);
+-      while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+-              dev_kfree_skb(skb);
+-      shaper_kick(shaper);
+-      up(&shaper->sem);
+-}
+-
+-/*
+  *    Bring the interface up. We just disallow this until a 
+  *    bind.
+  */
+@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
+ static int shaper_close(struct net_device *dev)
+ {
+       struct shaper *shaper=dev->priv;
+-      shaper_flush(shaper);
++      struct sk_buff *skb;
++
++      while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
++              dev_kfree_skb(skb);
++
++      spin_lock_bh(&shaper->lock);
++      shaper_kick(shaper);
++      spin_unlock_bh(&shaper->lock);
++
+       del_timer_sync(&shaper->timer);
+       return 0;
+ }
+@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
+       init_timer(&sh->timer);
+       sh->timer.function=shaper_timer;
+       sh->timer.data=(unsigned long)sh;
++      spin_lock_init(&sh->lock);
+ }
+ 
+ /*
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
+       /* FIXME, once all of the existing PCI drivers have been fixed to set
+        * the pci shutdown function, this test can go away. */
+       if (!drv->driver.shutdown)
+-              drv->driver.shutdown = pci_device_shutdown,
++              drv->driver.shutdown = pci_device_shutdown;
+       drv->driver.owner = drv->owner;
+       drv->driver.kobj.ktype = &pci_driver_kobj_type;
+       pci_init_dynids(&drv->dynids);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ 
+       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+-      if (!rport)
++      if (!rport) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate fc remote port!\n");
++              return;
++      }
+ 
+       if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
+               fcport->os_target_id = rport->scsi_target_id;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1150,7 +1150,7 @@ iospace_error_exit:
+  */
+ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
+ {
+-      int     ret;
++      int     ret = -ENODEV;
+       device_reg_t __iomem *reg;
+       struct Scsi_Host *host;
+       scsi_qla_host_t *ha;
+@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
+       fc_port_t *fcport;
+ 
+       if (pci_enable_device(pdev))
+-              return -1;
++              goto probe_out;
+ 
+       host = scsi_host_alloc(&qla2x00_driver_template,
+           sizeof(scsi_qla_host_t));
+@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 
+       /* Configure PCI I/O space */
+       ret = qla2x00_iospace_config(ha);
+-      if (ret != 0) {
+-              goto probe_alloc_failed;
+-      }
++      if (ret)
++              goto probe_failed;
+ 
+       /* Sanitize the information from PCI BIOS. */
+       host->irq = pdev->irq;
+@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+               qla_printk(KERN_WARNING, ha,
+                   "[ERROR] Failed to allocate memory for adapter\n");
+ 
+-              goto probe_alloc_failed;
++              ret = -ENOMEM;
++              goto probe_failed;
+       }
+ 
+-      pci_set_drvdata(pdev, ha);
+-      host->this_id = 255;
+-      host->cmd_per_lun = 3;
+-      host->unique_id = ha->instance;
+-      host->max_cmd_len = MAX_CMDSZ;
+-      host->max_channel = ha->ports - 1;
+-      host->max_id = ha->max_targets;
+-      host->max_lun = ha->max_luns;
+-      host->transportt = qla2xxx_transport_template;
+-      if (scsi_add_host(host, &pdev->dev))
+-              goto probe_alloc_failed;
+-
+-      qla2x00_alloc_sysfs_attr(ha);
+-
+       if (qla2x00_initialize_adapter(ha) &&
+           !(ha->device_flags & DFLG_NO_CABLE)) {
+ 
+@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+                   "Adapter flags %x.\n",
+                   ha->host_no, ha->device_flags));
+ 
++              ret = -ENODEV;
+               goto probe_failed;
+       }
+ 
+-      qla2x00_init_host_attr(ha);
+-
+       /*
+        * Startup the kernel thread for this host adapter
+        */
+@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to start DPC thread!\n");
+ 
++              ret = -ENODEV;
+               goto probe_failed;
+       }
+       wait_for_completion(&ha->dpc_inited);
+ 
++      host->this_id = 255;
++      host->cmd_per_lun = 3;
++      host->unique_id = ha->instance;
++      host->max_cmd_len = MAX_CMDSZ;
++      host->max_channel = ha->ports - 1;
++      host->max_lun = MAX_LUNS;
++      host->transportt = qla2xxx_transport_template;
++
+       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+               ret = request_irq(host->irq, qla2100_intr_handler,
+                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+       else
+               ret = request_irq(host->irq, qla2300_intr_handler,
+                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+-      if (ret != 0) {
++      if (ret) {
+               qla_printk(KERN_WARNING, ha,
+                   "Failed to reserve interrupt %d already in use.\n",
+                   host->irq);
+@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
+               msleep(10);
+       }
+ 
++      pci_set_drvdata(pdev, ha);
+       ha->flags.init_done = 1;
+       num_hosts++;
+ 
++      ret = scsi_add_host(host, &pdev->dev);
++      if (ret)
++              goto probe_failed;
++
++      qla2x00_alloc_sysfs_attr(ha);
++
++      qla2x00_init_host_attr(ha);
++
+       qla_printk(KERN_INFO, ha, "\n"
+           " QLogic Fibre Channel HBA Driver: %s\n"
+           "  QLogic %s - %s\n"
+@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
+ probe_failed:
+       fc_remove_host(ha->host);
+ 
+-      scsi_remove_host(host);
+-
+-probe_alloc_failed:
+       qla2x00_free_device(ha);
+ 
+       scsi_host_put(host);
+@@ -1394,7 +1394,8 @@ probe_alloc_failed:
+ probe_disable_device:
+       pci_disable_device(pdev);
+ 
+-      return -1;
++probe_out:
++      return ret;
+ }
+ EXPORT_SYMBOL_GPL(qla2x00_probe_one);
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
+ {
+       struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+ 
++      s->private = it;
+       if (! it)
+               return NULL;
++
+       if (NULL == sg_dev_arr)
+-              goto err1;
++              return NULL;
+       it->index = *pos;
+       it->max = sg_last_dev();
+       if (it->index >= it->max)
+-              goto err1;
++              return NULL;
+       return it;
+-err1:
+-      kfree(it);
+-      return NULL;
+ }
+ 
+ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+ {
+-      struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
++      struct sg_proc_deviter * it = s->private;
+ 
+       *pos = ++it->index;
+       return (it->index < it->max) ? it : NULL;
+@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
+ 
+ static void dev_seq_stop(struct seq_file *s, void *v)
+ {
+-      kfree (v);
++      struct sg_proc_deviter * it = s->private;
++
++      kfree (it);
+ }
+ 
+ static int sg_proc_open_dev(struct inode *inode, struct file *file)
+diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
+--- a/drivers/usb/net/usbnet.c
++++ b/drivers/usb/net/usbnet.c
+@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
+ 
+                       // copy the packet data to the new skb
+                       memcpy(skb_put(gl_skb, size), packet->packet_data, 
size);
+-                      skb_return (dev, skb);
++                      skb_return (dev, gl_skb);
+               }
+ 
+               // advance to the next packet
+diff --git a/fs/bio.c b/fs/bio.c
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
+        */
+       bio->bi_vcnt = bio_src->bi_vcnt;
+       bio->bi_size = bio_src->bi_size;
++      bio->bi_idx = bio_src->bi_idx;
+       bio_phys_segments(q, bio);
+       bio_hw_segments(q, bio);
+ }
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
+       struct char_device_struct *cd = NULL, **cp;
+       int i = major_to_index(major);
+ 
+-      up(&chrdevs_lock);
++      down(&chrdevs_lock);
+       for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+               if ((*cp)->major == major &&
+                   (*cp)->baseminor == baseminor &&
+diff --git a/fs/exec.c b/fs/exec.c
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
+       }
+       sig->group_exit_task = NULL;
+       sig->notify_count = 0;
++      sig->real_timer.data = (unsigned long)current;
+       spin_unlock_irq(lock);
+ 
+       /*
+diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
+--- a/fs/isofs/compress.c
++++ b/fs/isofs/compress.c
+@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
+       cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
+       brelse(bh);
+ 
++      if (cstart > cend)
++              goto eio;
++              
+       csize = cend-cstart;
+ 
++      if (csize > deflateBound(1UL << zisofs_block_shift))
++              goto eio;
++
+       /* Now page[] contains an array of pages, any of which can be NULL,
+          and the locks on which we hold.  We should now read the data and
+          release the pages.  If the pages are NULL the decompressed data
+diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
+--- a/include/asm-i386/string.h
++++ b/include/asm-i386/string.h
+@@ -116,7 +116,8 @@ __asm__ __volatile__(
+       "orb $1,%%al\n"
+       "3:"
+       :"=a" (__res), "=&S" (d0), "=&D" (d1)
+-                   :"1" (cs),"2" (ct));
++      :"1" (cs),"2" (ct)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -138,8 +139,9 @@ __asm__ __volatile__(
+       "3:\tsbbl %%eax,%%eax\n\t"
+       "orb $1,%%al\n"
+       "4:"
+-                   :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+-                   :"1" (cs),"2" (ct),"3" (count));
++      :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
++      :"1" (cs),"2" (ct),"3" (count)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -158,7 +160,9 @@ __asm__ __volatile__(
+       "movl $1,%1\n"
+       "2:\tmovl %1,%0\n\t"
+       "decl %0"
+-      :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
++      :"=a" (__res), "=&S" (d0)
++      :"1" (s),"0" (c)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -175,7 +179,9 @@ __asm__ __volatile__(
+       "leal -1(%%esi),%0\n"
+       "2:\ttestb %%al,%%al\n\t"
+       "jne 1b"
+-      :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
++      :"=g" (__res), "=&S" (d0), "=&a" (d1)
++      :"0" (0),"1" (s),"2" (c)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -189,7 +195,9 @@ __asm__ __volatile__(
+       "scasb\n\t"
+       "notl %0\n\t"
+       "decl %0"
+-      :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
++      :"=c" (__res), "=&D" (d0)
++      :"1" (s),"a" (0), "0" (0xffffffffu)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -333,7 +341,9 @@ __asm__ __volatile__(
+       "je 1f\n\t"
+       "movl $1,%0\n"
+       "1:\tdecl %0"
+-      :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
++      :"=D" (__res), "=&c" (d0)
++      :"a" (c),"0" (cs),"1" (count)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -369,7 +379,7 @@ __asm__ __volatile__(
+       "je 2f\n\t"
+       "stosb\n"
+       "2:"
+-      : "=&c" (d0), "=&D" (d1)
++      :"=&c" (d0), "=&D" (d1)
+       :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+       :"memory");
+ return (s);   
+@@ -392,7 +402,8 @@ __asm__ __volatile__(
+       "jne 1b\n"
+       "3:\tsubl %2,%0"
+       :"=a" (__res), "=&d" (d0)
+-      :"c" (s),"1" (count));
++      :"c" (s),"1" (count)
++      :"memory");
+ return __res;
+ }
+ /* end of additional stuff */
+@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
+               "dec %%edi\n"
+               "1:"
+               : "=D" (addr), "=c" (size)
+-              : "0" (addr), "1" (size), "a" (c));
++              : "0" (addr), "1" (size), "a" (c)
++              : "memory");
+       return addr;
+ }
+ 
+diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
+--- a/include/asm-x86_64/smp.h
++++ b/include/asm-x86_64/smp.h
+@@ -46,6 +46,8 @@ extern int pic_mode;
+ extern int smp_num_siblings;
+ extern void smp_flush_tlb(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
++                                   int retry, int wait);
+ extern void smp_send_reschedule(int cpu);
+ extern void smp_invalidate_rcv(void);         /* Process an NMI */
+ extern void zap_low_mappings(void);
+diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
+--- a/include/linux/if_shaper.h
++++ b/include/linux/if_shaper.h
+@@ -23,7 +23,7 @@ struct shaper
+       __u32 shapeclock;
+       unsigned long recovery; /* Time we can next clock a packet out on
+                                  an empty queue */
+-      struct semaphore sem;
++      spinlock_t lock;
+         struct net_device_stats stats;
+       struct net_device *dev;
+       int  (*hard_start_xmit) (struct sk_buff *skb,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
+ {
+       int hlen = skb_headlen(skb);
+ 
+-      if (offset + len <= hlen)
++      if (hlen - offset >= len)
+               return skb->data + offset;
+ 
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
+    stream state was inconsistent (such as zalloc or state being NULL).
+ */
+ 
++static inline unsigned long deflateBound(unsigned long s)
++{
++      return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
++}
++
+ extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
+ /*
+      Dynamically update the compression level and compression strategy.  The
+diff --git a/kernel/module.c b/kernel/module.c
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+ 
+-static void *percpu_modalloc(unsigned long size, unsigned long align)
++static void *percpu_modalloc(unsigned long size, unsigned long align,
++                           const char *name)
+ {
+       unsigned long extra;
+       unsigned int i;
+       void *ptr;
+ 
+-      BUG_ON(align > SMP_CACHE_BYTES);
++      if (align > SMP_CACHE_BYTES) {
++              printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
++                     name, align, SMP_CACHE_BYTES);
++              align = SMP_CACHE_BYTES;
++      }
+ 
+       ptr = __per_cpu_start;
+       for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+@@ -347,7 +352,8 @@ static int percpu_modinit(void)
+ }     
+ __initcall(percpu_modinit);
+ #else /* ... !CONFIG_SMP */
+-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
++                                  const char *name)
+ {
+       return NULL;
+ }
+@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
+       if (pcpuindex) {
+               /* We have a special allocation for this section. */
+               percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
+-                                       sechdrs[pcpuindex].sh_addralign);
++                                       sechdrs[pcpuindex].sh_addralign,
++                                       mod->name);
+               if (!percpu) {
+                       err = -ENOMEM;
+                       goto free_mod;
+diff --git a/kernel/signal.c b/kernel/signal.c
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
+ {
+       struct task_struct *t;
+ 
+-      if (p->flags & SIGNAL_GROUP_EXIT)
++      if (p->signal->flags & SIGNAL_GROUP_EXIT)
+               /*
+                * The process is in the middle of dying already.
+                */
+diff --git a/lib/inflate.c b/lib/inflate.c
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -326,7 +326,7 @@ DEBG("huft1 ");
+   {
+     *t = (struct huft *)NULL;
+     *m = 0;
+-    return 0;
++    return 2;
+   }
+ 
+ DEBG("huft2 ");
+@@ -374,6 +374,7 @@ DEBG("huft5 ");
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];                   /* set n to length of v */
+ 
+ DEBG("h6 ");
+ 
+@@ -410,12 +411,13 @@ DEBG1("1 ");
+ DEBG1("2 ");
+           f -= a + 1;           /* deduct codes from patterns left */
+           xp = c + k;
+-          while (++j < z)       /* try smaller tables up to z bits */
+-          {
+-            if ((f <<= 1) <= *++xp)
+-              break;            /* enough codes to use up j bits */
+-            f -= *xp;           /* else deduct codes from patterns */
+-          }
++          if (j < z)
++            while (++j < z)       /* try smaller tables up to z bits */
++            {
++              if ((f <<= 1) <= *++xp)
++                break;            /* enough codes to use up j bits */
++              f -= *xp;           /* else deduct codes from patterns */
++            }
+         }
+ DEBG1("3 ");
+         z = 1 << j;             /* table entries for j-bit table */
+diff --git a/mm/memory.c b/mm/memory.c
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
+ {
+       pgd_t *pgd;
+       unsigned long next;
+-      unsigned long end = addr + size;
++      unsigned long end = addr + PAGE_ALIGN(size);
+       struct mm_struct *mm = vma->vm_mm;
+       int err;
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
+       struct mempolicy *new;
+       DECLARE_BITMAP(nodes, MAX_NUMNODES);
+ 
+-      if (mode > MPOL_MAX)
++      if (mode < 0 || mode > MPOL_MAX)
+               return -EINVAL;
+       err = get_nodes(nodes, nmask, maxnode, mode);
+       if (err)
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
+                       if (!vlandev)
+                               continue;
+ 
++                      if (netif_carrier_ok(dev)) {
++                              if (!netif_carrier_ok(vlandev))
++                                      netif_carrier_on(vlandev);
++                      } else {
++                              if (netif_carrier_ok(vlandev))
++                                      netif_carrier_off(vlandev);
++                      }
++
+                       if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
+                               vlandev->state = (vlandev->state &~ 
VLAN_LINK_STATE_MASK) 
+                                       | flgs;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
+ {
+       struct sk_buff *skb;
+ 
+-      ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+-                     icmp_param->data_len+icmp_param->head_len,
+-                     icmp_param->head_len,
+-                     ipc, rt, MSG_DONTWAIT);
+-
+-      if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
++      if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
++                         icmp_param->data_len+icmp_param->head_len,
++                         icmp_param->head_len,
++                         ipc, rt, MSG_DONTWAIT) < 0)
++              ip_flush_pending_frames(icmp_socket->sk);
++      else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+               struct icmphdr *icmph = skb->h.icmph;
+               unsigned int csum = 0;
+               struct sk_buff *skb1;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
+ #ifdef CONFIG_NETFILTER_DEBUG
+       nf_debug_ip_loopback_xmit(newskb);
+ #endif
+-      nf_reset(newskb);
+       netif_rx(newskb);
+       return 0;
+ }
+@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
+       nf_debug_ip_finish_output2(skb);
+ #endif /*CONFIG_NETFILTER_DEBUG*/
+ 
+-      nf_reset(skb);
+-
+       if (hh) {
+               int hh_alen;
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -848,6 +848,9 @@ mc_msf_out:
+  
+               case IP_IPSEC_POLICY:
+               case IP_XFRM_POLICY:
++                      err = -EPERM;
++                      if (!capable(CAP_NET_ADMIN))
++                              break;
+                       err = xfrm_user_policy(sk, optname, optval, optlen);
+                       break;
+ 
+diff --git a/net/ipv4/netfilter/ip_conntrack_core.c 
b/net/ipv4/netfilter/ip_conntrack_core.c
+--- a/net/ipv4/netfilter/ip_conntrack_core.c
++++ b/net/ipv4/netfilter/ip_conntrack_core.c
+@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
+               schedule();
+               goto i_see_dead_people;
+       }
++      /* wait until all references to ip_conntrack_untracked are dropped */
++      while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
++              schedule();
+ 
+       kmem_cache_destroy(ip_conntrack_cachep);
+       kmem_cache_destroy(ip_conntrack_expect_cachep);
+diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c 
b/net/ipv4/netfilter/ip_conntrack_standalone.c
+--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
+@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+ {
++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
++      /* Previously seen (loopback)?  Ignore.  Do this before
++           fragment check. */
++      if ((*pskb)->nfct)
++              return NF_ACCEPT;
++#endif
++
+       /* Gather fragments. */
+       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+               *pskb = ip_ct_gather_frags(*pskb,
+diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c 
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
+                enum ip_nat_manip_type maniptype,
+                const struct ip_conntrack *conntrack)
+ {
+-      static u_int16_t port, *portptr;
++      static u_int16_t port;
++      u_int16_t *portptr;
+       unsigned int range_size, min, i;
+ 
+       if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c 
b/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
+@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
+                enum ip_nat_manip_type maniptype,
+                const struct ip_conntrack *conntrack)
+ {
+-      static u_int16_t port, *portptr;
++      static u_int16_t port;
++      u_int16_t *portptr;
+       unsigned int range_size, min, i;
+ 
+       if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -198,12 +198,13 @@ resubmit:
+               if (!raw_sk) {
+                       if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+                               IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+-                              icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, 
nhoff);
++                              icmpv6_send(skb, ICMPV6_PARAMPROB,
++                                          ICMPV6_UNK_NEXTHDR, nhoff,
++                                          skb->dev);
+                       }
+-              } else {
++              } else
+                       IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+-                      kfree_skb(skb);
+-              }
++              kfree_skb(skb);
+       }
+       rcu_read_unlock();
+       return 0;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -503,6 +503,9 @@ done:
+               break;
+       case IPV6_IPSEC_POLICY:
+       case IPV6_XFRM_POLICY:
++              retv = -EPERM;
++              if (!capable(CAP_NET_ADMIN))
++                      break;
+               retv = xfrm_user_policy(sk, optname, optval, optlen);
+               break;
+ 
+diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
+--- a/net/ipv6/netfilter/ip6_queue.c
++++ b/net/ipv6/netfilter/ip6_queue.c
+@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
+ static void
+ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
+ {
++      local_bh_disable();
+       nf_reinject(entry->skb, entry->info, verdict);
++      local_bh_enable();
+       kfree(entry);
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -315,8 +315,8 @@ err:
+ static void netlink_remove(struct sock *sk)
+ {
+       netlink_table_grab();
+-      nl_table[sk->sk_protocol].hash.entries--;
+-      sk_del_node_init(sk);
++      if (sk_del_node_init(sk))
++              nl_table[sk->sk_protocol].hash.entries--;
+       if (nlk_sk(sk)->groups)
+               __sk_del_bind_node(sk);
+       netlink_table_ungrab();
+@@ -429,7 +429,12 @@ retry:
+       err = netlink_insert(sk, pid);
+       if (err == -EADDRINUSE)
+               goto retry;
+-      return 0;
++
++      /* If 2 threads race to autobind, that is fine.  */
++      if (err == -EBUSY)
++              err = 0;
++
++      return err;
+ }
+ 
+ static inline int netlink_capable(struct socket *sock, unsigned int flag) 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
+       dst_release(skb->dst);
+       skb->dst = NULL;
+ 
++      /* drop conntrack reference */
++      nf_reset(skb);
++
+       spkt = (struct sockaddr_pkt*)skb->cb;
+ 
+       skb_push(skb, skb->data-skb->mac.raw);
+@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
+       dst_release(skb->dst);
+       skb->dst = NULL;
+ 
++      /* drop conntrack reference */
++      nf_reset(skb);
++
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.tp_packets++;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
+       if (nr > XFRM_MAX_DEPTH)
+               return NULL;
+ 
++      if (p->dir > XFRM_POLICY_OUT)
++              return NULL;
++
+       xp = xfrm_policy_alloc(GFP_KERNEL);
+       if (xp == NULL) {
+               *dir = -ENOBUFS;
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
+ 
+       if (keyring->description) {
+               write_lock(&keyring_name_lock);
+-              list_del(&keyring->type_data.link);
++
++              if (keyring->type_data.link.next != NULL &&
++                  !list_empty(&keyring->type_data.link))
++                      list_del(&keyring->type_data.link);
++
+               write_unlock(&keyring_name_lock);
+       }
+ 
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
+               keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
+               if (IS_ERR(keyring)) {
+                       ret = PTR_ERR(keyring);
+-                      goto error;
++                      goto error2;
+               }
+       }
+       else if (IS_ERR(keyring)) {
diff -r 19af31a59537 -r f31494465fb0 tools/security/get_decision.c
--- /dev/null   Fri Oct 21 11:06:17 2005
+++ b/tools/security/get_decision.c     Fri Oct 21 11:07:14 2005
@@ -0,0 +1,176 @@
+/****************************************************************
+ * get_decision.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@xxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * An example program that shows how to retrieve an access control
+ * decision from the hypervisor ACM based on the currently active policy.
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <netinet/in.h>
+#include <xen/acm.h>
+#include <xen/acm_ops.h>
+#include <xen/linux/privcmd.h>
+
+#define PERROR(_m, _a...) \
+fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a ,  \
+                errno, strerror(errno))
+
+void usage(char *progname)
+{
+    printf("Use: %s \n", progname);
+    printf(" Test program illustrating the retrieval of\n");
+    printf(" access control decisions from xen. At this time,\n");
+    printf(" only sharing (STE) policy decisions are supported.\n");
+    printf(" parameter options:\n");
+    printf("\t -i domid -i domid\n");
+    printf("\t -i domid -s ssidref\n");
+    printf("\t -s ssidref -s ssidref\n\n");
+    exit(-1);
+}
+
+static inline int do_policycmd(int xc_handle, unsigned int cmd,
+                               unsigned long data)
+{
+    return ioctl(xc_handle, cmd, data);
+}
+
+static inline int do_xen_hypercall(int xc_handle,
+                                   privcmd_hypercall_t * hypercall)
+{
+    return do_policycmd(xc_handle,
+                        IOCTL_PRIVCMD_HYPERCALL,
+                        (unsigned long) hypercall);
+}
+
+static inline int do_acm_op(int xc_handle, struct acm_op *op)
+{
+    int ret = -1;
+    privcmd_hypercall_t hypercall;
+
+    op->interface_version = ACM_INTERFACE_VERSION;
+
+    hypercall.op = __HYPERVISOR_acm_op;
+    hypercall.arg[0] = (unsigned long) op;
+
+    if (mlock(op, sizeof(*op)) != 0) {
+        PERROR("Could not lock memory for Xen policy hypercall");
+        goto out1;
+    }
+
+    if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0) {
+        if (errno == EACCES)
+            fprintf(stderr, "ACM operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+        goto out2;
+    }
+
+  out2:(void) munlock(op, sizeof(*op));
+  out1:return ret;
+}
+
+
+/************************ get decision ******************************/
+
+/* this example uses two domain ids and retrieves the decision if these domains
+ * can share information (useful, i.e., to enforce policy onto network traffic 
in dom0
+ */
+int acm_get_decision(int xc_handle, int argc, char *const argv[])
+{
+    struct acm_op op;
+    int ret;
+
+    op.cmd = ACM_GETDECISION;
+    op.interface_version = ACM_INTERFACE_VERSION;
+    op.u.getdecision.get_decision_by1 = UNSET;
+    op.u.getdecision.get_decision_by2 = UNSET;
+    op.u.getdecision.hook = SHARING;
+
+    while (1) {
+        int c = getopt(argc, argv, "i:s:");
+        if (c == -1)
+            break;
+
+        if (c == 'i') {
+            if (op.u.getdecision.get_decision_by1 == UNSET) {
+                op.u.getdecision.get_decision_by1 = DOMAINID;
+                op.u.getdecision.id1.domainid = strtoul(optarg, NULL, 0);
+            } else if (op.u.getdecision.get_decision_by2 == UNSET) {
+                op.u.getdecision.get_decision_by2 = DOMAINID;
+                op.u.getdecision.id2.domainid = strtoul(optarg, NULL, 0);
+            } else
+                usage(argv[0]);
+        } else if (c == 's') {
+            if (op.u.getdecision.get_decision_by1 == UNSET) {
+                op.u.getdecision.get_decision_by1 = SSIDREF;
+                op.u.getdecision.id1.ssidref = strtoul(optarg, NULL, 0);
+            } else if (op.u.getdecision.get_decision_by2 == UNSET) {
+                op.u.getdecision.get_decision_by2 = SSIDREF;
+                op.u.getdecision.id2.ssidref = strtoul(optarg, NULL, 0);
+            } else
+                usage(argv[0]);
+        } else
+            usage(argv[0]);
+    }
+    if ((op.u.getdecision.get_decision_by1 == UNSET) ||
+        (op.u.getdecision.get_decision_by2 == UNSET))
+        usage(argv[0]);
+
+    if ((ret = do_acm_op(xc_handle, &op))) {
+        printf("%s: Error getting decision (%d).\n", __func__, ret);
+        printf("%s: decision = %s.\n", __func__,
+               (op.u.getdecision.acm_decision ==
+                ACM_ACCESS_PERMITTED) ? "PERMITTED" : ((op.u.getdecision.
+                                                        acm_decision ==
+                                                        ACM_ACCESS_DENIED)
+                                                       ? "DENIED" :
+                                                       "ERROR"));
+        return ret;
+    }
+    return op.u.getdecision.acm_decision;
+}
+
+/***************************** main **************************************/
+
+int main(int argc, char **argv)
+{
+
+    int acm_cmd_fd, ret = 0;
+
+    if (argc < 5)
+        usage(argv[0]);
+
+    if ((acm_cmd_fd = open("/proc/xen/privcmd", O_RDONLY)) <= 0) {
+        printf("ERROR: Could not open xen privcmd device!\n");
+        exit(-1);
+    }
+
+    ret = acm_get_decision(acm_cmd_fd, argc, argv);
+
+    printf("Decision: %s (%d)\n",
+           (ret == ACM_ACCESS_PERMITTED) ? "PERMITTED" :
+           ((ret == ACM_ACCESS_DENIED) ? "DENIED" : "ERROR"), ret);
+
+    close(acm_cmd_fd);
+    return ret;
+}
diff -r 19af31a59537 -r f31494465fb0 patches/linux-2.6.12/2.6.12.5.patch
--- a/patches/linux-2.6.12/2.6.12.5.patch       Fri Oct 21 11:06:17 2005
+++ /dev/null   Fri Oct 21 11:07:14 2005
@@ -1,1614 +0,0 @@
-diff --git a/Makefile b/Makefile
---- a/Makefile
-+++ b/Makefile
-@@ -1,7 +1,7 @@
- VERSION = 2
- PATCHLEVEL = 6
- SUBLEVEL = 12
--EXTRAVERSION =
-+EXTRAVERSION = .5
- NAME=Woozy Numbat
- 
- # *DOCUMENTATION*
-@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
- #(which is the most common case IMHO) to avoid unneeded clutter in the big 
tags file.
- #Adding $(srctree) adds about 20M on i386 to the size of the output file!
- 
--ifeq ($(KBUILD_OUTPUT),)
-+ifeq ($(src),$(obj))
- __srctree =
- else
- __srctree = $(srctree)/
-diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-@@ -44,7 +44,7 @@
- 
- #define PFX "powernow-k8: "
- #define BFX PFX "BIOS error: "
--#define VERSION "version 1.40.2"
-+#define VERSION "version 1.40.4"
- #include "powernow-k8.h"
- 
- /* serialize freq changes  */
-@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
- {
-       struct powernow_k8_data *data;
-       cpumask_t oldmask = CPU_MASK_ALL;
--      int rc;
-+      int rc, i;
- 
-       if (!check_supported_cpu(pol->cpu))
-               return -ENODEV;
-@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
-       printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
-              data->currfid, data->currvid);
- 
--      powernow_data[pol->cpu] = data;
-+      for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
-+              powernow_data[i] = data;
-+      }
- 
-       return 0;
- 
-diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
---- a/arch/i386/kernel/process.c
-+++ b/arch/i386/kernel/process.c
-@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
- 
-+      memset(&info, 0, sizeof(info));
-+
-       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- 
-       info.entry_number = idx;
-diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
---- a/arch/ia64/kernel/ptrace.c
-+++ b/arch/ia64/kernel/ptrace.c
-@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
-                               *data = (pt->cr_ipsr & IPSR_MASK);
-                       return 0;
- 
-+                    case PT_AR_RSC:
-+                      if (write_access)
-+                              pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-+                      else
-+                              *data = pt->ar_rsc;
-+                      return 0;
-+
-                     case PT_AR_RNAT:
-                       urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
-                       rnat_addr = (long) ia64_rse_rnat_addr((long *)
-@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
-                     case PT_AR_BSPSTORE:
-                       ptr = pt_reg_addr(pt, ar_bspstore);
-                       break;
--                    case PT_AR_RSC:
--                      ptr = pt_reg_addr(pt, ar_rsc);
--                      break;
-                     case PT_AR_UNAT:
-                       ptr = pt_reg_addr(pt, ar_unat);
-                       break;
-@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
- static long
- ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user 
*ppr)
- {
--      unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-+      unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-       struct unw_frame_info info;
-       struct switch_stack *sw;
-       struct ia64_fpreg fpval;
-@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
-       /* app regs */
- 
-       retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
--      retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
-+      retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
-       retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
-       retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
-       retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
-       retval |= __get_user(nat_bits, &ppr->nat);
- 
-       retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
-+      retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
-       retval |= access_uarea(child, PT_AR_EC, &ec, 1);
-       retval |= access_uarea(child, PT_AR_LC, &lc, 1);
-       retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
-diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
---- a/arch/ia64/kernel/signal.c
-+++ b/arch/ia64/kernel/signal.c
-@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
- static long
- restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
- {
--      unsigned long ip, flags, nat, um, cfm;
-+      unsigned long ip, flags, nat, um, cfm, rsc;
-       long err;
- 
-       /* Always make any pending restarted system calls return -EINTR */
-@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
-       err |= __get_user(ip, &sc->sc_ip);                      /* instruction 
pointer */
-       err |= __get_user(cfm, &sc->sc_cfm);
-       err |= __get_user(um, &sc->sc_um);                      /* user mask */
--      err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
-+      err |= __get_user(rsc, &sc->sc_ar_rsc);
-       err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
-       err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
-       err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
-@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
-       err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);       /* r15 
*/
- 
-       scr->pt.cr_ifs = cfm | (1UL << 63);
-+      scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
- 
-       /* establish new instruction pointer: */
-       scr->pt.cr_iip = ip & ~0x3UL;
-diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
---- a/arch/ppc/kernel/time.c
-+++ b/arch/ppc/kernel/time.c
-@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
- 
- extern unsigned long wall_jiffies;
- 
-+/* used for timezone offset */
-+static long timezone_offset;
-+
- DEFINE_SPINLOCK(rtc_lock);
- 
- EXPORT_SYMBOL(rtc_lock);
-@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
-                    xtime.tv_sec - last_rtc_update >= 659 &&
-                    abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 
500000/HZ &&
-                    jiffies - wall_jiffies == 1) {
--                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) 
== 0)
-+                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + 
timezone_offset) == 0)
-                               last_rtc_update = xtime.tv_sec+1;
-                       else
-                               /* Try again one minute later */
-@@ -286,7 +289,7 @@ void __init time_init(void)
-       unsigned old_stamp, stamp, elapsed;
- 
-         if (ppc_md.time_init != NULL)
--                time_offset = ppc_md.time_init();
-+                timezone_offset = ppc_md.time_init();
- 
-       if (__USE_RTC()) {
-               /* 601 processor: dec counts down by 128 every 128ns */
-@@ -331,10 +334,10 @@ void __init time_init(void)
-       set_dec(tb_ticks_per_jiffy);
- 
-       /* If platform provided a timezone (pmac), we correct the time */
--        if (time_offset) {
--              sys_tz.tz_minuteswest = -time_offset / 60;
-+        if (timezone_offset) {
-+              sys_tz.tz_minuteswest = -timezone_offset / 60;
-               sys_tz.tz_dsttime = 0;
--              xtime.tv_sec -= time_offset;
-+              xtime.tv_sec -= timezone_offset;
-         }
-         set_normalized_timespec(&wall_to_monotonic,
-                                 -xtime.tv_sec, -xtime.tv_nsec);
-diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
---- a/arch/ppc64/boot/zlib.c
-+++ b/arch/ppc64/boot/zlib.c
-@@ -1307,7 +1307,7 @@ local int huft_build(
-   {
-     *t = (inflate_huft *)Z_NULL;
-     *m = 0;
--    return Z_OK;
-+    return Z_DATA_ERROR;
-   }
- 
- 
-@@ -1351,6 +1351,7 @@ local int huft_build(
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];                   /* set n to length of v */
- 
- 
-   /* Generate the Huffman codes and for each, make the table entries */
-diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
---- a/arch/um/kernel/process.c
-+++ b/arch/um/kernel/process.c
-@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
-       return(arg.pid);
- }
- 
--static int ptrace_child(void)
-+static int ptrace_child(void *arg)
- {
-       int ret;
-       int pid = os_getpid(), ppid = getppid();
-@@ -159,16 +159,20 @@ static int ptrace_child(void)
-       _exit(ret);
- }
- 
--static int start_ptraced_child(void)
-+static int start_ptraced_child(void **stack_out)
- {
-+      void *stack;
-+      unsigned long sp;
-       int pid, n, status;
-       
--      pid = fork();
--      if(pid == 0)
--              ptrace_child();
--
-+      stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
-+                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-+      if(stack == MAP_FAILED)
-+              panic("check_ptrace : mmap failed, errno = %d", errno);
-+      sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
-+      pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
-       if(pid < 0)
--              panic("check_ptrace : fork failed, errno = %d", errno);
-+              panic("check_ptrace : clone failed, errno = %d", errno);
-       CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
-       if(n < 0)
-               panic("check_ptrace : wait failed, errno = %d", errno);
-@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
-               panic("check_ptrace : expected SIGSTOP, got status = %d",
-                     status);
- 
-+      *stack_out = stack;
-       return(pid);
- }
- 
-@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
-  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
-  * So only for SYSEMU features we test mustpanic, while normal host features
-  * must work anyway!*/
--static int stop_ptraced_child(int pid, int exitcode, int mustexit)
-+static int stop_ptraced_child(int pid, void *stack, int exitcode, int 
mustpanic)
- {
-       int status, n, ret = 0;
- 
-       if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
--              panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
-+              panic("check_ptrace : ptrace failed, errno = %d", errno);
-       CATCH_EINTR(n = waitpid(pid, &status, 0));
-       if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
-               int exit_with = WEXITSTATUS(status);
-@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
-               printk("check_ptrace : child exited with exitcode %d, while "
-                     "expecting %d; status 0x%x", exit_with,
-                     exitcode, status);
--              if (mustexit)
-+              if (mustpanic)
-                       panic("\n");
-               else
-                       printk("\n");
-               ret = -1;
-       }
- 
-+      if(munmap(stack, PAGE_SIZE) < 0)
-+              panic("check_ptrace : munmap failed, errno = %d", errno);
-       return ret;
- }
- 
-@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
- 
- static void __init check_sysemu(void)
- {
-+      void *stack;
-       int pid, syscall, n, status, count=0;
- 
-       printk("Checking syscall emulation patch for ptrace...");
-       sysemu_supported = 0;
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
-               goto fail;
-@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
-               panic("check_sysemu : failed to modify system "
-                     "call return, errno = %d", errno);
- 
--      if (stop_ptraced_child(pid, 0, 0) < 0)
-+      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
-               goto fail_stopped;
- 
-       sysemu_supported = 1;
-@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
-       set_using_sysemu(!force_sysemu_disabled);
- 
-       printk("Checking advanced syscall emulation patch for ptrace...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
-       while(1){
-               count++;
-               if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
-@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
-                       break;
-               }
-       }
--      if (stop_ptraced_child(pid, 0, 0) < 0)
-+      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
-               goto fail_stopped;
- 
-       sysemu_supported = 2;
-@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
-       return;
- 
- fail:
--      stop_ptraced_child(pid, 1, 0);
-+      stop_ptraced_child(pid, stack, 1, 0);
- fail_stopped:
-       printk("missing\n");
- }
- 
- void __init check_ptrace(void)
- {
-+      void *stack;
-       int pid, syscall, n, status;
- 
-       printk("Checking that ptrace can change system call numbers...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) 
< 0)
-               panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", 
errno);
-@@ -330,7 +339,7 @@ void __init check_ptrace(void)
-                       break;
-               }
-       }
--      stop_ptraced_child(pid, 0, 1);
-+      stop_ptraced_child(pid, stack, 0, 1);
-       printk("OK\n");
-       check_sysemu();
- }
-@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
- static inline int check_skas3_ptrace_support(void)
- {
-       struct ptrace_faultinfo fi;
-+      void *stack;
-       int pid, n, ret = 1;
- 
-       printf("Checking for the skas3 patch in the host...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
-       if (n < 0) {
-@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
-       }
- 
-       init_registers(pid);
--      stop_ptraced_child(pid, 1, 1);
-+      stop_ptraced_child(pid, stack, 1, 1);
- 
-       return(ret);
- }
-diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
---- a/arch/x86_64/ia32/syscall32.c
-+++ b/arch/x86_64/ia32/syscall32.c
-@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
-       int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-       struct vm_area_struct *vma;
-       struct mm_struct *mm = current->mm;
-+      int ret;
- 
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma)
-@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
-       vma->vm_mm = mm;
- 
-       down_write(&mm->mmap_sem);
--      insert_vm_struct(mm, vma);
-+      if ((ret = insert_vm_struct(mm, vma))) {
-+              up_write(&mm->mmap_sem);
-+              kmem_cache_free(vm_area_cachep, vma);
-+              return ret;
-+      }
-       mm->total_vm += npages;
-       up_write(&mm->mmap_sem);
-       return 0;
-diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
---- a/arch/x86_64/kernel/setup.c
-+++ b/arch/x86_64/kernel/setup.c
-@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
-       int cpu = smp_processor_id();
-       int node = 0;
-       unsigned bits;
--      if (c->x86_num_cores == 1)
--              return;
- 
-       bits = 0;
-       while ((1 << bits) < c->x86_num_cores)
-diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
---- a/arch/x86_64/kernel/smp.c
-+++ b/arch/x86_64/kernel/smp.c
-@@ -284,6 +284,71 @@ struct call_data_struct {
- static struct call_data_struct * call_data;
- 
- /*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ */
-+static void __smp_call_function_single (int cpu, void (*func) (void *info), 
void *info,
-+                              int nonatomic, int wait)
-+{
-+      struct call_data_struct data;
-+      int cpus = 1;
-+
-+      data.func = func;
-+      data.info = info;
-+      atomic_set(&data.started, 0);
-+      data.wait = wait;
-+      if (wait)
-+              atomic_set(&data.finished, 0);
-+
-+      call_data = &data;
-+      wmb();
-+      /* Send a message to all other CPUs and wait for them to respond */
-+      send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+      /* Wait for response */
-+      while (atomic_read(&data.started) != cpus)
-+              cpu_relax();
-+
-+      if (!wait)
-+              return;
-+
-+      while (atomic_read(&data.finished) != cpus)
-+              cpu_relax();
-+}
-+
-+/*
-+ * Run a function on another CPU
-+ *  <func>    The function to run. This must be fast and non-blocking.
-+ *  <info>    An arbitrary pointer to pass to the function.
-+ *  <nonatomic>       Currently unused.
-+ *  <wait>    If true, wait until function has completed on other CPUs.
-+ *  [RETURNS]   0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
-+      int nonatomic, int wait)
-+{
-+      
-+      int me = get_cpu(); /* prevent preemption and reschedule on another 
processor */
-+
-+      if (cpu == me) {
-+              printk("%s: trying to call self\n", __func__);
-+              put_cpu();
-+              return -EBUSY;
-+      }
-+      spin_lock_bh(&call_lock);
-+
-+      __smp_call_function_single(cpu, func,info,nonatomic,wait);      
-+
-+      spin_unlock_bh(&call_lock);
-+      put_cpu();
-+      return 0;
-+}
-+
-+/*
-  * this function sends a 'generic call function' IPI to all other CPUs
-  * in the system.
-  */
-diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
---- a/arch/x86_64/kernel/smpboot.c
-+++ b/arch/x86_64/kernel/smpboot.c
-@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
- {
-       unsigned long flags, i;
- 
--      if (smp_processor_id() != boot_cpu_id)
--              return;
--
-       go[MASTER] = 0;
- 
-       local_irq_save(flags);
-@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
-       return tcenter - best_tm;
- }
- 
--static __cpuinit void sync_tsc(void)
-+static __cpuinit void sync_tsc(unsigned int master)
- {
-       int i, done = 0;
-       long delta, adj, adjust_latency = 0;
-@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
-       } t[NUM_ROUNDS] __cpuinitdata;
- #endif
- 
-+      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
-+              smp_processor_id(), master);
-+
-       go[MASTER] = 1;
- 
--      smp_call_function(sync_master, NULL, 1, 0);
-+      /* It is dangerous to broadcast IPI as cpus are coming up,
-+       * as they may not be ready to accept them.  So since
-+       * we only need to send the ipi to the boot cpu direct
-+       * the message, and avoid the race.
-+       */
-+      smp_call_function_single(master, sync_master, NULL, 1, 0);
- 
-       while (go[MASTER])      /* wait for master to be ready */
-               no_cpu_relax();
-@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
-       printk(KERN_INFO
-              "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
-              "maxerr %lu cycles)\n",
--             smp_processor_id(), boot_cpu_id, delta, rt);
-+             smp_processor_id(), master, delta, rt);
- }
- 
- static void __cpuinit tsc_sync_wait(void)
- {
-       if (notscsync || !cpu_has_tsc)
-               return;
--      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
--                      boot_cpu_id);
--      sync_tsc();
-+      sync_tsc(0);
- }
- 
- static __init int notscsync_setup(char *s)
-diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
---- a/drivers/acpi/pci_irq.c
-+++ b/drivers/acpi/pci_irq.c
-@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
-               printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
-                       pci_name(dev), ('A' + pin));
-               /* Interrupt Line values above 0xF are forbidden */
--              if (dev->irq >= 0 && (dev->irq <= 0xF)) {
-+              if (dev->irq > 0 && (dev->irq <= 0xF)) {
-                       printk(" - using IRQ %d\n", dev->irq);
-+                      acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, 
ACPI_ACTIVE_LOW);
-                       return_VALUE(0);
-               }
-               else {
-diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
---- a/drivers/char/rocket.c
-+++ b/drivers/char/rocket.c
-@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
-               ToRecv = space;
- 
-       if (ToRecv <= 0)
--              return;
-+              goto done;
- 
-       /*
-        * if status indicates there are errored characters in the
-@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
-       }
-       /*  Push the data up to the tty layer */
-       ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
-+done:
-       tty_ldisc_deref(ld);
- }
- 
-diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
---- a/drivers/char/tpm/tpm.c
-+++ b/drivers/char/tpm/tpm.c
-@@ -32,12 +32,6 @@
- 
- #define       TPM_BUFSIZE                     2048
- 
--/* PCI configuration addresses */
--#define       PCI_GEN_PMCON_1                 0xA0
--#define       PCI_GEN1_DEC                    0xE4
--#define       PCI_LPC_EN                      0xE6
--#define       PCI_GEN2_DEC                    0xEC
--
- static LIST_HEAD(tpm_chip_list);
- static DEFINE_SPINLOCK(driver_lock);
- static int dev_mask[32];
-@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
- EXPORT_SYMBOL_GPL(tpm_time_expired);
- 
- /*
-- * Initialize the LPC bus and enable the TPM ports
-- */
--int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
--{
--      u32 lpcenable, tmp;
--      int is_lpcm = 0;
--
--      switch (pci_dev->vendor) {
--      case PCI_VENDOR_ID_INTEL:
--              switch (pci_dev->device) {
--              case PCI_DEVICE_ID_INTEL_82801CA_12:
--              case PCI_DEVICE_ID_INTEL_82801DB_12:
--                      is_lpcm = 1;
--                      break;
--              }
--              /* init ICH (enable LPC) */
--              pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
--              lpcenable |= 0x20000000;
--              pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
--
--              if (is_lpcm) {
--                      pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
--                                            &lpcenable);
--                      if ((lpcenable & 0x20000000) == 0) {
--                              dev_err(&pci_dev->dev,
--                                      "cannot enable LPC\n");
--                              return -ENODEV;
--                      }
--              }
--
--              /* initialize TPM registers */
--              pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
--
--              if (!is_lpcm)
--                      tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
--              else
--                      tmp =
--                          (tmp & 0xFFFF0000) | (base & 0xFFF0) |
--                          0x00000001;
--
--              pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
--
--              if (is_lpcm) {
--                      pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
--                                            &tmp);
--                      tmp |= 0x00000004;      /* enable CLKRUN */
--                      pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
--                                             tmp);
--              }
--              tpm_write_index(0x0D, 0x55);    /* unlock 4F */
--              tpm_write_index(0x0A, 0x00);    /* int disable */
--              tpm_write_index(0x08, base);    /* base addr lo */
--              tpm_write_index(0x09, (base & 0xFF00) >> 8);    /* base addr hi 
*/
--              tpm_write_index(0x0D, 0xAA);    /* lock 4F */
--              break;
--      case PCI_VENDOR_ID_AMD:
--              /* nothing yet */
--              break;
--      }
--
--      return 0;
--}
--
--EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
--
--/*
-  * Internal kernel interface to transmit TPM commands
-  */
- static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
-@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
-       if (chip == NULL)
-               return -ENODEV;
- 
--      spin_lock(&driver_lock);
--      tpm_lpc_bus_init(pci_dev, chip->vendor->base);
--      spin_unlock(&driver_lock);
--
-       return 0;
- }
- 
-diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
---- a/drivers/char/tpm/tpm.h
-+++ b/drivers/char/tpm/tpm.h
-@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
- }
- 
- extern void tpm_time_expired(unsigned long);
--extern int tpm_lpc_bus_init(struct pci_dev *, u16);
--
- extern int tpm_register_hardware(struct pci_dev *,
-                                struct tpm_vendor_specific *);
- extern int tpm_open(struct inode *, struct file *);
-diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
---- a/drivers/char/tpm/tpm_atmel.c
-+++ b/drivers/char/tpm/tpm_atmel.c
-@@ -22,7 +22,10 @@
- #include "tpm.h"
- 
- /* Atmel definitions */
--#define       TPM_ATML_BASE                   0x400
-+enum tpm_atmel_addr {
-+      TPM_ATMEL_BASE_ADDR_LO = 0x08,
-+      TPM_ATMEL_BASE_ADDR_HI = 0x09
-+};
- 
- /* write status bits */
- #define       ATML_STATUS_ABORT               0x01
-@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
-       .cancel = tpm_atml_cancel,
-       .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
-       .req_complete_val = ATML_STATUS_DATA_AVAIL,
--      .base = TPM_ATML_BASE,
-       .miscdev = { .fops = &atmel_ops, },
- };
- 
-@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
- {
-       u8 version[4];
-       int rc = 0;
-+      int lo, hi;
- 
-       if (pci_enable_device(pci_dev))
-               return -EIO;
- 
--      if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
--              rc = -ENODEV;
--              goto out_err;
--      }
-+      lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
-+      hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
-+
-+      tpm_atmel.base = (hi<<8)|lo;
-+      dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
- 
-       /* verify that it is an Atmel part */
-       if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
-diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
---- a/drivers/char/tpm/tpm_nsc.c
-+++ b/drivers/char/tpm/tpm_nsc.c
-@@ -24,6 +24,10 @@
- /* National definitions */
- #define       TPM_NSC_BASE                    0x360
- #define       TPM_NSC_IRQ                     0x07
-+#define       TPM_NSC_BASE0_HI                0x60
-+#define       TPM_NSC_BASE0_LO                0x61
-+#define       TPM_NSC_BASE1_HI                0x62
-+#define       TPM_NSC_BASE1_LO                0x63
- 
- #define       NSC_LDN_INDEX                   0x07
- #define       NSC_SID_INDEX                   0x20
-@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
-       .cancel = tpm_nsc_cancel,
-       .req_complete_mask = NSC_STATUS_OBF,
-       .req_complete_val = NSC_STATUS_OBF,
--      .base = TPM_NSC_BASE,
-       .miscdev = { .fops = &nsc_ops, },
-       
- };
-@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
-                                 const struct pci_device_id *pci_id)
- {
-       int rc = 0;
-+      int lo, hi;
-+
-+      hi = tpm_read_index(TPM_NSC_BASE0_HI);
-+      lo = tpm_read_index(TPM_NSC_BASE0_LO);
-+
-+      tpm_nsc.base = (hi<<8) | lo;
- 
-       if (pci_enable_device(pci_dev))
-               return -EIO;
- 
--      if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
--              rc = -ENODEV;
--              goto out_err;
--      }
--
-       /* verify that it is a National part (SID) */
-       if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
-               rc = -ENODEV;
-diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
---- a/drivers/char/tty_ioctl.c
-+++ b/drivers/char/tty_ioctl.c
-@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
-                       ld = tty_ldisc_ref(tty);
-                       switch (arg) {
-                       case TCIFLUSH:
--                              if (ld->flush_buffer)
-+                              if (ld && ld->flush_buffer)
-                                       ld->flush_buffer(tty);
-                               break;
-                       case TCIOFLUSH:
--                              if (ld->flush_buffer)
-+                              if (ld && ld->flush_buffer)
-                                       ld->flush_buffer(tty);
-                               /* fall through */
-                       case TCOFLUSH:
-diff --git a/drivers/media/video/cx88/cx88-video.c 
b/drivers/media/video/cx88/cx88-video.c
---- a/drivers/media/video/cx88/cx88-video.c
-+++ b/drivers/media/video/cx88/cx88-video.c
-@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
-                       .default_value = 0,
-                       .type          = V4L2_CTRL_TYPE_INTEGER,
-               },
--              .off                   = 0,
-+              .off                   = 128,
-               .reg                   = MO_HUE,
-               .mask                  = 0x00ff,
-               .shift                 = 0,
-diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
---- a/drivers/net/e1000/e1000_main.c
-+++ b/drivers/net/e1000/e1000_main.c
-@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
-       tso = e1000_tso(adapter, skb);
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-+              spin_unlock_irqrestore(&adapter->tx_lock, flags);
-               return NETDEV_TX_OK;
-       }
- 
-diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
---- a/drivers/net/hamradio/Kconfig
-+++ b/drivers/net/hamradio/Kconfig
-@@ -17,7 +17,7 @@ config MKISS
- 
- config 6PACK
-       tristate "Serial port 6PACK driver"
--      depends on AX25 && BROKEN_ON_SMP
-+      depends on AX25
-       ---help---
-         6pack is a transmission protocol for the data exchange between your
-         PC and your TNC (the Terminal Node Controller acts as a kind of
-diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
---- a/drivers/net/shaper.c
-+++ b/drivers/net/shaper.c
-@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
- {
-       struct shaper *shaper = dev->priv;
-       struct sk_buff *ptr;
--   
--      if (down_trylock(&shaper->sem))
--              return -1;
- 
-+      spin_lock(&shaper->lock);
-       ptr=shaper->sendq.prev;
-       
-       /*
-@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
-                 shaper->stats.collisions++;
-       }
-       shaper_kick(shaper);
--      up(&shaper->sem);
-+      spin_unlock(&shaper->lock);
-       return 0;
- }
- 
-@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
- {
-       struct shaper *shaper = (struct shaper *)data;
- 
--      if (!down_trylock(&shaper->sem)) {
--              shaper_kick(shaper);
--              up(&shaper->sem);
--      } else
--              mod_timer(&shaper->timer, jiffies);
-+      spin_lock(&shaper->lock);
-+      shaper_kick(shaper);
-+      spin_unlock(&shaper->lock);
- }
- 
- /*
-@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
- 
- 
- /*
-- *    Flush the shaper queues on a closedown
-- */
-- 
--static void shaper_flush(struct shaper *shaper)
--{
--      struct sk_buff *skb;
--
--      down(&shaper->sem);
--      while((skb=skb_dequeue(&shaper->sendq))!=NULL)
--              dev_kfree_skb(skb);
--      shaper_kick(shaper);
--      up(&shaper->sem);
--}
--
--/*
-  *    Bring the interface up. We just disallow this until a 
-  *    bind.
-  */
-@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
- static int shaper_close(struct net_device *dev)
- {
-       struct shaper *shaper=dev->priv;
--      shaper_flush(shaper);
-+      struct sk_buff *skb;
-+
-+      while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
-+              dev_kfree_skb(skb);
-+
-+      spin_lock_bh(&shaper->lock);
-+      shaper_kick(shaper);
-+      spin_unlock_bh(&shaper->lock);
-+
-       del_timer_sync(&shaper->timer);
-       return 0;
- }
-@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
-       init_timer(&sh->timer);
-       sh->timer.function=shaper_timer;
-       sh->timer.data=(unsigned long)sh;
-+      spin_lock_init(&sh->lock);
- }
- 
- /*
-diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
---- a/drivers/pci/pci-driver.c
-+++ b/drivers/pci/pci-driver.c
-@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
-       /* FIXME, once all of the existing PCI drivers have been fixed to set
-        * the pci shutdown function, this test can go away. */
-       if (!drv->driver.shutdown)
--              drv->driver.shutdown = pci_device_shutdown,
-+              drv->driver.shutdown = pci_device_shutdown;
-       drv->driver.owner = drv->owner;
-       drv->driver.kobj.ktype = &pci_driver_kobj_type;
-       pci_init_dynids(&drv->dynids);
-diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
---- a/drivers/scsi/qla2xxx/qla_init.c
-+++ b/drivers/scsi/qla2xxx/qla_init.c
-@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
- 
-       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
--      if (!rport)
-+      if (!rport) {
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to allocate fc remote port!\n");
-+              return;
-+      }
- 
-       if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
-               fcport->os_target_id = rport->scsi_target_id;
-diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
---- a/drivers/scsi/qla2xxx/qla_os.c
-+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1150,7 +1150,7 @@ iospace_error_exit:
-  */
- int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
- {
--      int     ret;
-+      int     ret = -ENODEV;
-       device_reg_t __iomem *reg;
-       struct Scsi_Host *host;
-       scsi_qla_host_t *ha;
-@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
-       fc_port_t *fcport;
- 
-       if (pci_enable_device(pdev))
--              return -1;
-+              goto probe_out;
- 
-       host = scsi_host_alloc(&qla2x00_driver_template,
-           sizeof(scsi_qla_host_t));
-@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
- 
-       /* Configure PCI I/O space */
-       ret = qla2x00_iospace_config(ha);
--      if (ret != 0) {
--              goto probe_alloc_failed;
--      }
-+      if (ret)
-+              goto probe_failed;
- 
-       /* Sanitize the information from PCI BIOS. */
-       host->irq = pdev->irq;
-@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
-               qla_printk(KERN_WARNING, ha,
-                   "[ERROR] Failed to allocate memory for adapter\n");
- 
--              goto probe_alloc_failed;
-+              ret = -ENOMEM;
-+              goto probe_failed;
-       }
- 
--      pci_set_drvdata(pdev, ha);
--      host->this_id = 255;
--      host->cmd_per_lun = 3;
--      host->unique_id = ha->instance;
--      host->max_cmd_len = MAX_CMDSZ;
--      host->max_channel = ha->ports - 1;
--      host->max_id = ha->max_targets;
--      host->max_lun = ha->max_luns;
--      host->transportt = qla2xxx_transport_template;
--      if (scsi_add_host(host, &pdev->dev))
--              goto probe_alloc_failed;
--
--      qla2x00_alloc_sysfs_attr(ha);
--
-       if (qla2x00_initialize_adapter(ha) &&
-           !(ha->device_flags & DFLG_NO_CABLE)) {
- 
-@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
-                   "Adapter flags %x.\n",
-                   ha->host_no, ha->device_flags));
- 
-+              ret = -ENODEV;
-               goto probe_failed;
-       }
- 
--      qla2x00_init_host_attr(ha);
--
-       /*
-        * Startup the kernel thread for this host adapter
-        */
-@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to start DPC thread!\n");
- 
-+              ret = -ENODEV;
-               goto probe_failed;
-       }
-       wait_for_completion(&ha->dpc_inited);
- 
-+      host->this_id = 255;
-+      host->cmd_per_lun = 3;
-+      host->unique_id = ha->instance;
-+      host->max_cmd_len = MAX_CMDSZ;
-+      host->max_channel = ha->ports - 1;
-+      host->max_lun = MAX_LUNS;
-+      host->transportt = qla2xxx_transport_template;
-+
-       if (IS_QLA2100(ha) || IS_QLA2200(ha))
-               ret = request_irq(host->irq, qla2100_intr_handler,
-                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
-       else
-               ret = request_irq(host->irq, qla2300_intr_handler,
-                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
--      if (ret != 0) {
-+      if (ret) {
-               qla_printk(KERN_WARNING, ha,
-                   "Failed to reserve interrupt %d already in use.\n",
-                   host->irq);
-@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
-               msleep(10);
-       }
- 
-+      pci_set_drvdata(pdev, ha);
-       ha->flags.init_done = 1;
-       num_hosts++;
- 
-+      ret = scsi_add_host(host, &pdev->dev);
-+      if (ret)
-+              goto probe_failed;
-+
-+      qla2x00_alloc_sysfs_attr(ha);
-+
-+      qla2x00_init_host_attr(ha);
-+
-       qla_printk(KERN_INFO, ha, "\n"
-           " QLogic Fibre Channel HBA Driver: %s\n"
-           "  QLogic %s - %s\n"
-@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
- probe_failed:
-       fc_remove_host(ha->host);
- 
--      scsi_remove_host(host);
--
--probe_alloc_failed:
-       qla2x00_free_device(ha);
- 
-       scsi_host_put(host);
-@@ -1394,7 +1394,8 @@ probe_alloc_failed:
- probe_disable_device:
-       pci_disable_device(pdev);
- 
--      return -1;
-+probe_out:
-+      return ret;
- }
- EXPORT_SYMBOL_GPL(qla2x00_probe_one);
- 
-diff --git a/fs/bio.c b/fs/bio.c
---- a/fs/bio.c
-+++ b/fs/bio.c
-@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
-        */
-       bio->bi_vcnt = bio_src->bi_vcnt;
-       bio->bi_size = bio_src->bi_size;
-+      bio->bi_idx = bio_src->bi_idx;
-       bio_phys_segments(q, bio);
-       bio_hw_segments(q, bio);
- }
-diff --git a/fs/char_dev.c b/fs/char_dev.c
---- a/fs/char_dev.c
-+++ b/fs/char_dev.c
-@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
-       struct char_device_struct *cd = NULL, **cp;
-       int i = major_to_index(major);
- 
--      up(&chrdevs_lock);
-+      down(&chrdevs_lock);
-       for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
-               if ((*cp)->major == major &&
-                   (*cp)->baseminor == baseminor &&
-diff --git a/fs/exec.c b/fs/exec.c
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
-       }
-       sig->group_exit_task = NULL;
-       sig->notify_count = 0;
-+      sig->real_timer.data = (unsigned long)current;
-       spin_unlock_irq(lock);
- 
-       /*
-diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
---- a/fs/isofs/compress.c
-+++ b/fs/isofs/compress.c
-@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
-       cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
-       brelse(bh);
- 
-+      if (cstart > cend)
-+              goto eio;
-+              
-       csize = cend-cstart;
- 
-+      if (csize > deflateBound(1UL << zisofs_block_shift))
-+              goto eio;
-+
-       /* Now page[] contains an array of pages, any of which can be NULL,
-          and the locks on which we hold.  We should now read the data and
-          release the pages.  If the pages are NULL the decompressed data
-diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
---- a/include/asm-i386/string.h
-+++ b/include/asm-i386/string.h
-@@ -116,7 +116,8 @@ __asm__ __volatile__(
-       "orb $1,%%al\n"
-       "3:"
-       :"=a" (__res), "=&S" (d0), "=&D" (d1)
--                   :"1" (cs),"2" (ct));
-+      :"1" (cs),"2" (ct)
-+      :"memory");
- return __res;
- }
- 
-@@ -138,8 +139,9 @@ __asm__ __volatile__(
-       "3:\tsbbl %%eax,%%eax\n\t"
-       "orb $1,%%al\n"
-       "4:"
--                   :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
--                   :"1" (cs),"2" (ct),"3" (count));
-+      :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-+      :"1" (cs),"2" (ct),"3" (count)
-+      :"memory");
- return __res;
- }
- 
-@@ -158,7 +160,9 @@ __asm__ __volatile__(
-       "movl $1,%1\n"
-       "2:\tmovl %1,%0\n\t"
-       "decl %0"
--      :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
-+      :"=a" (__res), "=&S" (d0)
-+      :"1" (s),"0" (c)
-+      :"memory");
- return __res;
- }
- 
-@@ -175,7 +179,9 @@ __asm__ __volatile__(
-       "leal -1(%%esi),%0\n"
-       "2:\ttestb %%al,%%al\n\t"
-       "jne 1b"
--      :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
-+      :"=g" (__res), "=&S" (d0), "=&a" (d1)
-+      :"0" (0),"1" (s),"2" (c)
-+      :"memory");
- return __res;
- }
- 
-@@ -189,7 +195,9 @@ __asm__ __volatile__(
-       "scasb\n\t"
-       "notl %0\n\t"
-       "decl %0"
--      :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
-+      :"=c" (__res), "=&D" (d0)
-+      :"1" (s),"a" (0), "0" (0xffffffffu)
-+      :"memory");
- return __res;
- }
- 
-@@ -333,7 +341,9 @@ __asm__ __volatile__(
-       "je 1f\n\t"
-       "movl $1,%0\n"
-       "1:\tdecl %0"
--      :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
-+      :"=D" (__res), "=&c" (d0)
-+      :"a" (c),"0" (cs),"1" (count)
-+      :"memory");
- return __res;
- }
- 
-@@ -369,7 +379,7 @@ __asm__ __volatile__(
-       "je 2f\n\t"
-       "stosb\n"
-       "2:"
--      : "=&c" (d0), "=&D" (d1)
-+      :"=&c" (d0), "=&D" (d1)
-       :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
-       :"memory");
- return (s);   
-@@ -392,7 +402,8 @@ __asm__ __volatile__(
-       "jne 1b\n"
-       "3:\tsubl %2,%0"
-       :"=a" (__res), "=&d" (d0)
--      :"c" (s),"1" (count));
-+      :"c" (s),"1" (count)
-+      :"memory");
- return __res;
- }
- /* end of additional stuff */
-@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
-               "dec %%edi\n"
-               "1:"
-               : "=D" (addr), "=c" (size)
--              : "0" (addr), "1" (size), "a" (c));
-+              : "0" (addr), "1" (size), "a" (c)
-+              : "memory");
-       return addr;
- }
- 
-diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
---- a/include/asm-x86_64/smp.h
-+++ b/include/asm-x86_64/smp.h
-@@ -46,6 +46,8 @@ extern int pic_mode;
- extern int smp_num_siblings;
- extern void smp_flush_tlb(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
-+                                   int retry, int wait);
- extern void smp_send_reschedule(int cpu);
- extern void smp_invalidate_rcv(void);         /* Process an NMI */
- extern void zap_low_mappings(void);
-diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
---- a/include/linux/if_shaper.h
-+++ b/include/linux/if_shaper.h
-@@ -23,7 +23,7 @@ struct shaper
-       __u32 shapeclock;
-       unsigned long recovery; /* Time we can next clock a packet out on
-                                  an empty queue */
--      struct semaphore sem;
-+      spinlock_t lock;
-         struct net_device_stats stats;
-       struct net_device *dev;
-       int  (*hard_start_xmit) (struct sk_buff *skb,
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
- {
-       int hlen = skb_headlen(skb);
- 
--      if (offset + len <= hlen)
-+      if (hlen - offset >= len)
-               return skb->data + offset;
- 
-       if (skb_copy_bits(skb, offset, buffer, len) < 0)
-diff --git a/include/linux/zlib.h b/include/linux/zlib.h
---- a/include/linux/zlib.h
-+++ b/include/linux/zlib.h
-@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
-    stream state was inconsistent (such as zalloc or state being NULL).
- */
- 
-+static inline unsigned long deflateBound(unsigned long s)
-+{
-+      return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
-+}
-+
- extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
- /*
-      Dynamically update the compression level and compression strategy.  The
-diff --git a/kernel/module.c b/kernel/module.c
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
- /* Created by linker magic */
- extern char __per_cpu_start[], __per_cpu_end[];
- 
--static void *percpu_modalloc(unsigned long size, unsigned long align)
-+static void *percpu_modalloc(unsigned long size, unsigned long align,
-+                           const char *name)
- {
-       unsigned long extra;
-       unsigned int i;
-       void *ptr;
- 
--      BUG_ON(align > SMP_CACHE_BYTES);
-+      if (align > SMP_CACHE_BYTES) {
-+              printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
-+                     name, align, SMP_CACHE_BYTES);
-+              align = SMP_CACHE_BYTES;
-+      }
- 
-       ptr = __per_cpu_start;
-       for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-@@ -347,7 +352,8 @@ static int percpu_modinit(void)
- }     
- __initcall(percpu_modinit);
- #else /* ... !CONFIG_SMP */
--static inline void *percpu_modalloc(unsigned long size, unsigned long align)
-+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
-+                                  const char *name)
- {
-       return NULL;
- }
-@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
-       if (pcpuindex) {
-               /* We have a special allocation for this section. */
-               percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
--                                       sechdrs[pcpuindex].sh_addralign);
-+                                       sechdrs[pcpuindex].sh_addralign,
-+                                       mod->name);
-               if (!percpu) {
-                       err = -ENOMEM;
-                       goto free_mod;
-diff --git a/lib/inflate.c b/lib/inflate.c
---- a/lib/inflate.c
-+++ b/lib/inflate.c
-@@ -326,7 +326,7 @@ DEBG("huft1 ");
-   {
-     *t = (struct huft *)NULL;
-     *m = 0;
--    return 0;
-+    return 2;
-   }
- 
- DEBG("huft2 ");
-@@ -374,6 +374,7 @@ DEBG("huft5 ");
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];                   /* set n to length of v */
- 
- DEBG("h6 ");
- 
-@@ -410,12 +411,13 @@ DEBG1("1 ");
- DEBG1("2 ");
-           f -= a + 1;           /* deduct codes from patterns left */
-           xp = c + k;
--          while (++j < z)       /* try smaller tables up to z bits */
--          {
--            if ((f <<= 1) <= *++xp)
--              break;            /* enough codes to use up j bits */
--            f -= *xp;           /* else deduct codes from patterns */
--          }
-+          if (j < z)
-+            while (++j < z)       /* try smaller tables up to z bits */
-+            {
-+              if ((f <<= 1) <= *++xp)
-+                break;            /* enough codes to use up j bits */
-+              f -= *xp;           /* else deduct codes from patterns */
-+            }
-         }
- DEBG1("3 ");
-         z = 1 << j;             /* table entries for j-bit table */
-diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
---- a/lib/zlib_inflate/inftrees.c
-+++ b/lib/zlib_inflate/inftrees.c
-@@ -141,7 +141,7 @@ static int huft_build(
-   {
-     *t = NULL;
-     *m = 0;
--    return Z_OK;
-+    return Z_DATA_ERROR;
-   }
- 
- 
-diff --git a/mm/memory.c b/mm/memory.c
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
- {
-       pgd_t *pgd;
-       unsigned long next;
--      unsigned long end = addr + size;
-+      unsigned long end = addr + PAGE_ALIGN(size);
-       struct mm_struct *mm = vma->vm_mm;
-       int err;
- 
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
-       struct mempolicy *new;
-       DECLARE_BITMAP(nodes, MAX_NUMNODES);
- 
--      if (mode > MPOL_MAX)
-+      if (mode < 0 || mode > MPOL_MAX)
-               return -EINVAL;
-       err = get_nodes(nodes, nmask, maxnode, mode);
-       if (err)
-diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
---- a/net/8021q/vlan.c
-+++ b/net/8021q/vlan.c
-@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
-                       if (!vlandev)
-                               continue;
- 
-+                      if (netif_carrier_ok(dev)) {
-+                              if (!netif_carrier_ok(vlandev))
-+                                      netif_carrier_on(vlandev);
-+                      } else {
-+                              if (netif_carrier_ok(vlandev))
-+                                      netif_carrier_off(vlandev);
-+                      }
-+
-                       if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
-                               vlandev->state = (vlandev->state &~ 
VLAN_LINK_STATE_MASK) 
-                                       | flgs;
-diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
---- a/net/ipv4/ip_output.c
-+++ b/net/ipv4/ip_output.c
-@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
- #ifdef CONFIG_NETFILTER_DEBUG
-       nf_debug_ip_loopback_xmit(newskb);
- #endif
--      nf_reset(newskb);
-       netif_rx(newskb);
-       return 0;
- }
-@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
-       nf_debug_ip_finish_output2(skb);
- #endif /*CONFIG_NETFILTER_DEBUG*/
- 
--      nf_reset(skb);
--
-       if (hh) {
-               int hh_alen;
- 
-diff --git a/net/ipv4/netfilter/ip_conntrack_core.c 
b/net/ipv4/netfilter/ip_conntrack_core.c
---- a/net/ipv4/netfilter/ip_conntrack_core.c
-+++ b/net/ipv4/netfilter/ip_conntrack_core.c
-@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
-               schedule();
-               goto i_see_dead_people;
-       }
-+      /* wait until all references to ip_conntrack_untracked are dropped */
-+      while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
-+              schedule();
- 
-       kmem_cache_destroy(ip_conntrack_cachep);
-       kmem_cache_destroy(ip_conntrack_expect_cachep);
-diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c 
b/net/ipv4/netfilter/ip_conntrack_standalone.c
---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
-+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
-@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
- {
-+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
-+      /* Previously seen (loopback)?  Ignore.  Do this before
-+           fragment check. */
-+      if ((*pskb)->nfct)
-+              return NF_ACCEPT;
-+#endif
-+
-       /* Gather fragments. */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
-               *pskb = ip_ct_gather_frags(*pskb,
-diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c 
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
-@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
- {
--      static u_int16_t port, *portptr;
-+      static u_int16_t port;
-+      u_int16_t *portptr;
-       unsigned int range_size, min, i;
- 
-       if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c 
b/net/ipv4/netfilter/ip_nat_proto_udp.c
---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
-@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
- {
--      static u_int16_t port, *portptr;
-+      static u_int16_t port;
-+      u_int16_t *portptr;
-       unsigned int range_size, min, i;
- 
-       if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
---- a/net/ipv6/netfilter/ip6_queue.c
-+++ b/net/ipv6/netfilter/ip6_queue.c
-@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
- static void
- ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
- {
-+      local_bh_disable();
-       nf_reinject(entry->skb, entry->info, verdict);
-+      local_bh_enable();
-       kfree(entry);
- }
- 
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -315,8 +315,8 @@ err:
- static void netlink_remove(struct sock *sk)
- {
-       netlink_table_grab();
--      nl_table[sk->sk_protocol].hash.entries--;
--      sk_del_node_init(sk);
-+      if (sk_del_node_init(sk))
-+              nl_table[sk->sk_protocol].hash.entries--;
-       if (nlk_sk(sk)->groups)
-               __sk_del_bind_node(sk);
-       netlink_table_ungrab();
-@@ -429,7 +429,12 @@ retry:
-       err = netlink_insert(sk, pid);
-       if (err == -EADDRINUSE)
-               goto retry;
--      return 0;
-+
-+      /* If 2 threads race to autobind, that is fine.  */
-+      if (err == -EBUSY)
-+              err = 0;
-+
-+      return err;
- }
- 
- static inline int netlink_capable(struct socket *sock, unsigned int flag) 
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
-       dst_release(skb->dst);
-       skb->dst = NULL;
- 
-+      /* drop conntrack reference */
-+      nf_reset(skb);
-+
-       spkt = (struct sockaddr_pkt*)skb->cb;
- 
-       skb_push(skb, skb->data-skb->mac.raw);
-@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
-       dst_release(skb->dst);
-       skb->dst = NULL;
- 
-+      /* drop conntrack reference */
-+      nf_reset(skb);
-+
-       spin_lock(&sk->sk_receive_queue.lock);
-       po->stats.tp_packets++;
-       __skb_queue_tail(&sk->sk_receive_queue, skb);
-diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
---- a/net/xfrm/xfrm_user.c
-+++ b/net/xfrm/xfrm_user.c
-@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
-       if (nr > XFRM_MAX_DEPTH)
-               return NULL;
- 
-+      if (p->dir > XFRM_POLICY_OUT)
-+              return NULL;
-+
-       xp = xfrm_policy_alloc(GFP_KERNEL);
-       if (xp == NULL) {
-               *dir = -ENOBUFS;
-diff --git a/security/keys/keyring.c b/security/keys/keyring.c
---- a/security/keys/keyring.c
-+++ b/security/keys/keyring.c
-@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
- 
-       if (keyring->description) {
-               write_lock(&keyring_name_lock);
--              list_del(&keyring->type_data.link);
-+
-+              if (keyring->type_data.link.next != NULL &&
-+                  !list_empty(&keyring->type_data.link))
-+                      list_del(&keyring->type_data.link);
-+
-               write_unlock(&keyring_name_lock);
-       }
- 
-diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
---- a/security/keys/process_keys.c
-+++ b/security/keys/process_keys.c
-@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
-               keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
-               if (IS_ERR(keyring)) {
-                       ret = PTR_ERR(keyring);
--                      goto error;
-+                      goto error2;
-               }
-       }
-       else if (IS_ERR(keyring)) {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.