[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merged.



# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID 4321438e92a7bc87d9d9d6c6be4624acb0280883
# Parent  7f8db234e9dbfaa1c1c812c02f9cddcc70184980
# Parent  e519f3239a978c708e56d5cf6ec554b8f6a6afb7
Merged.

diff -r 7f8db234e9db -r 4321438e92a7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c    Wed Nov  2 
15:42:29 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c    Wed Nov  2 
15:43:32 2005
@@ -520,7 +520,7 @@
                printk("\n");
 }
 
-cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 
 /* This is hacky. :)
  * We're emulating future behavior.
@@ -562,7 +562,7 @@
 #endif
 }
 
-void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
 {
        unsigned long frames[16];
        unsigned long va;
@@ -585,7 +585,7 @@
  * and IDT. We reload them nevertheless, this function acts as a
  * 'CPU state barrier', nothing should get across.
  */
-void __init cpu_init (void)
+void __cpuinit cpu_init (void)
 {
        int cpu = smp_processor_id();
        struct tss_struct * t = &per_cpu(init_tss, cpu);
diff -r 7f8db234e9db -r 4321438e92a7 
linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c    Wed Nov  2 15:42:29 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c    Wed Nov  2 15:43:32 2005
@@ -191,10 +191,17 @@
        int cpu, rc;
        struct task_struct *idle;
 
-       if (max_cpus == 0)
-               return;
-
-       xen_smp_intr_init(0);
+       cpu_data[0] = boot_cpu_data;
+
+       cpu_2_logical_apicid[0] = 0;
+       x86_cpu_to_apicid[0] = 0;
+
+       current_thread_info()->cpu = 0;
+       cpu_sibling_map[0] = cpumask_of_cpu(0);
+       cpu_core_map[0]    = cpumask_of_cpu(0);
+
+       if (max_cpus != 0)
+               xen_smp_intr_init(0);
 
        for (cpu = 1; cpu < max_cpus; cpu++) {
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
@@ -229,16 +236,20 @@
                make_page_readonly((void *)cpu_gdt_descr[cpu].address);
 
                cpu_set(cpu, cpu_possible_map);
+#ifdef CONFIG_HOTPLUG_CPU
                if (xen_start_info->flags & SIF_INITDOMAIN)
                        cpu_set(cpu, cpu_present_map);
+#else
+               cpu_set(cpu, cpu_present_map);
+#endif
 
                vcpu_prepare(cpu);
        }
 
        /* Currently, Xen gives no dynamic NUMA/HT info. */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+       for (cpu = 1; cpu < NR_CPUS; cpu++) {
+               cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
+               cpu_core_map[cpu]    = cpumask_of_cpu(cpu);
        }
 
 #ifdef CONFIG_X86_IO_APIC
@@ -256,18 +267,9 @@
        cpu_possible_map = cpumask_of_cpu(0);
        cpu_present_map  = cpumask_of_cpu(0);
        cpu_online_map   = cpumask_of_cpu(0);
-
-       cpu_data[0] = boot_cpu_data;
-       cpu_2_logical_apicid[0] = 0;
-       x86_cpu_to_apicid[0] = 0;
-
-       current_thread_info()->cpu = 0;
-       cpus_clear(cpu_sibling_map[0]);
-       cpu_set(0, cpu_sibling_map[0]);
-
-       cpus_clear(cpu_core_map[0]);
-       cpu_set(0, cpu_core_map[0]);
-}
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
 
 static void vcpu_hotplug(unsigned int cpu)
 {
@@ -288,11 +290,7 @@
                cpu_set(cpu, cpu_present_map);
                (void)cpu_up(cpu);
        } else if (strcmp(state, "offline") == 0) {
-#ifdef CONFIG_HOTPLUG_CPU
                (void)cpu_down(cpu);
-#else
-               printk(KERN_INFO "Ignoring CPU%d hotplug request\n", cpu);
-#endif
        } else {
                printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
                       state, cpu);
@@ -341,8 +339,6 @@
 }
 
 subsys_initcall(setup_vcpu_hotplug_event);
-
-#ifdef CONFIG_HOTPLUG_CPU
 
 int __cpu_disable(void)
 {
diff -r 7f8db234e9db -r 4321438e92a7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c     Wed Nov  2 
15:42:29 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c     Wed Nov  2 
15:43:32 2005
@@ -35,7 +35,7 @@
 #endif
 char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
 
-cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 
 struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 
 
@@ -130,7 +130,7 @@
         xen_new_user_pt(__pa(init_level4_user_pgt));
 }
 
-void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
 {
        unsigned long frames[16];
        unsigned long va;
@@ -227,7 +227,7 @@
 #endif
 }
 
-void __init check_efer(void)
+void __cpuinit check_efer(void)
 {
        unsigned long efer;
 
@@ -244,7 +244,7 @@
  * 'CPU state barrier', nothing should get across.
  * A lot of state is already set up in PDA init.
  */
-void __init cpu_init (void)
+void __cpuinit cpu_init (void)
 {
 #ifdef CONFIG_SMP
        int cpu = stack_smp_processor_id();
diff -r 7f8db234e9db -r 4321438e92a7 
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Wed Nov  2 
15:42:29 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Wed Nov  2 
15:43:32 2005
@@ -458,7 +458,7 @@
        node += strlen(watch->node);
 
        /* FIXME: clean up when error on the other end. */
-       if (info->connected == BLKIF_STATE_CONNECTED)
+       if ((info->connected == BLKIF_STATE_CONNECTED) || info->mi)
                return;
 
        err = xenbus_gather(NULL, watch->node,
diff -r 7f8db234e9db -r 4321438e92a7 tools/console/daemon/io.c
--- a/tools/console/daemon/io.c Wed Nov  2 15:42:29 2005
+++ b/tools/console/daemon/io.c Wed Nov  2 15:43:32 2005
@@ -380,12 +380,21 @@
        if (!buffer_empty(&d->buffer))
                return;
 
-       if (d->buffer.data)
+       if (d->buffer.data) {
                free(d->buffer.data);
-       d->buffer.data = NULL;
-       if (d->tty_fd != -1)
+               d->buffer.data = NULL;
+       }
+
+       if (d->tty_fd != -1) {
                close(d->tty_fd);
-       d->tty_fd = -1;
+               d->tty_fd = -1;
+       }
+
+       if (d->conspath) {
+               free(d->conspath);
+               d->conspath = NULL;
+       }
+
        remove_domain(d);
 }
 
diff -r 7f8db234e9db -r 4321438e92a7 tools/console/daemon/main.c
--- a/tools/console/daemon/main.c       Wed Nov  2 15:42:29 2005
+++ b/tools/console/daemon/main.c       Wed Nov  2 15:43:32 2005
@@ -30,10 +30,14 @@
 #include "utils.h"
 #include "io.h"
 
-void usage(char *prg)
+static void usage(char *name)
 {
-       fprintf(stderr, 
-               "usage: %s [-h] [-V] [-v] [-i]\n", prg);
+       printf("Usage: %s [-h] [-V] [-v] [-i]\n", name);
+}
+
+static void version(char *name)
+{
+       printf("Xen Console Daemon 3.0\n");
 }
 
 int main(int argc, char **argv)
@@ -58,7 +62,7 @@
                        usage(argv[0]);
                        exit(0);
                case 'V':
-                       //version(argv[0]);
+                       version(argv[0]);
                        exit(0);
                case 'v':
                        syslog_option |= LOG_PERROR;
diff -r 7f8db234e9db -r 4321438e92a7 tools/vtpm_manager/manager/vtpm_manager.c
--- a/tools/vtpm_manager/manager/vtpm_manager.c Wed Nov  2 15:42:29 2005
+++ b/tools/vtpm_manager/manager/vtpm_manager.c Wed Nov  2 15:43:32 2005
@@ -140,12 +140,15 @@
   TPM_AUTHDATA sharedsecret;
   
   TPMTRYRETURN( VTSP_OSAP(vtpm_globals->manager_tcs_handle,
-                         TPM_ET_SRK,
-                         0, 
+                         TPM_ET_KEYHANDLE,
+                         TPM_SRK_KEYHANDLE, 
                          (const TPM_AUTHDATA*)&vtpm_globals->srk_usage_auth,
                          &sharedsecret, 
                          &osap) ); 
-  
+
+  osap.fContinueAuthSession = FALSE;
+ 
+ 
   TPMTRYRETURN( VTSP_CreateWrapKey( vtpm_globals->manager_tcs_handle,
                                    TPM_KEY_BIND,
                                    (const 
TPM_AUTHDATA*)&vtpm_globals->storage_key_usage_auth,
diff -r 7f8db234e9db -r 4321438e92a7 tools/vtpm_manager/manager/vtsp.c
--- a/tools/vtpm_manager/manager/vtsp.c Wed Nov  2 15:42:29 2005
+++ b/tools/vtpm_manager/manager/vtsp.c Wed Nov  2 15:43:32 2005
@@ -180,8 +180,8 @@
   Crypto_GetRandom((BYTE *) &nonceOddOSAP, sizeof(TPM_NONCE) ); 
   
   TPMTRYRETURN( TCSP_OSAP(    hContext,
-                             TPM_ET_SRK,
-                             0, 
+                             entityType,
+                             entityValue, 
                              nonceOddOSAP,
                              &auth->AuthHandle, 
                              &auth->NonceEven, 
diff -r 7f8db234e9db -r 4321438e92a7 tools/vtpm_manager/util/buffer.h
--- a/tools/vtpm_manager/util/buffer.h  Wed Nov  2 15:42:29 2005
+++ b/tools/vtpm_manager/util/buffer.h  Wed Nov  2 15:43:32 2005
@@ -36,18 +36,6 @@
 
 #include <stddef.h>             // for pointer NULL
 #include "tcg.h"
-
-// structure to enable use of FMT_SIZE32_DATA in BSG_Unpack
-typedef struct pack_buf_t {
-  UINT32 size;
-  BYTE * data;
-} pack_buf_t;
-
-// and a const version for Pack
-typedef struct pack_constbuf_t {
-  UINT32 size;
-  const BYTE* data;
-} pack_constbuf_t;
 
 typedef UINT32 tpm_size_t;
 
diff -r 7f8db234e9db -r 4321438e92a7 tools/vtpm_manager/util/tcg.h
--- a/tools/vtpm_manager/util/tcg.h     Wed Nov  2 15:42:29 2005
+++ b/tools/vtpm_manager/util/tcg.h     Wed Nov  2 15:43:32 2005
@@ -190,6 +190,20 @@
   BOOL   fContinueAuthSession;
   TPM_AUTHDATA  HMAC;
 } TCS_AUTH;
+
+// structures for dealing with sizes followed by buffers in all the
+// TCG structure.
+typedef struct pack_buf_t {
+  UINT32 size;
+  BYTE * data;
+} pack_buf_t;
+
+typedef struct pack_constbuf_t {
+  UINT32 size;
+  const BYTE* data;
+} pack_constbuf_t;
+
+
 
 // **************************** CONSTANTS *********************************
 
diff -r 7f8db234e9db -r 4321438e92a7 tools/xenstore/xs.h
--- a/tools/xenstore/xs.h       Wed Nov  2 15:42:29 2005
+++ b/tools/xenstore/xs.h       Wed Nov  2 15:43:32 2005
@@ -136,7 +136,7 @@
  */
 bool xs_release_domain(struct xs_handle *h, unsigned int domid);
 
-/* Query the home path of a domain.
+/* Query the home path of a domain.  Call free() after use.
  */
 char *xs_get_domain_path(struct xs_handle *h, unsigned int domid);
 
diff -r 7f8db234e9db -r 4321438e92a7 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Nov  2 15:42:29 2005
+++ b/xen/arch/x86/domain.c     Wed Nov  2 15:43:32 2005
@@ -408,6 +408,9 @@
         if ( !pagetable_get_paddr(d->arch.phys_table) )
             d->arch.phys_table = v->arch.guest_table;
 
+        /* Initialize monitor page table */
+        v->arch.monitor_table = mk_pagetable(0);
+
         vmx_final_setup_guest(v);
     }
 
diff -r 7f8db234e9db -r 4321438e92a7 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Wed Nov  2 15:42:29 2005
+++ b/xen/arch/x86/shadow.c     Wed Nov  2 15:43:32 2005
@@ -1,19 +1,19 @@
 /******************************************************************************
- * arch/x86/shadow_64.c
- * 
+ * arch/x86/shadow.c
+ *
  * Copyright (c) 2005 Michael A Fetterman
  * Based on an earlier implementation by Ian Pratt et al
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
@@ -55,7 +55,6 @@
     unsigned long va, unsigned int from, unsigned int to);
 static inline void validate_bl2e_change( struct domain *d,
     guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
-
 #endif
 
 /********
@@ -102,7 +101,6 @@
         return 1;
 #endif
         return 0;
-        
     }
 
     // To convert this page to use as a page table, the writable count
@@ -490,12 +488,12 @@
          * We could proactively fill in PDEs for pages that are already
          * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
          * (restriction required for coherence of the accessed bit). However,
-         * we tried it and it didn't help performance. This is simpler. 
+         * we tried it and it didn't help performance. This is simpler.
          */
         memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
 
         /* Install hypervisor and 2x linear p.t. mapings. */
-        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
+        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
 
@@ -522,7 +520,7 @@
             //
             if ( !get_shadow_ref(hl2mfn) )
                 BUG();
-            
+
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
                 l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
         }
@@ -532,7 +530,7 @@
     }
     else
     {
-        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));        
+        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));
     }
 
     unmap_domain_page(spl2e);
@@ -543,7 +541,7 @@
 #endif
 
 static void shadow_map_l1_into_current_l2(unsigned long va)
-{ 
+{
     struct vcpu *v = current;
     struct domain *d = v->domain;
     l1_pgentry_t *spl1e;
@@ -596,7 +594,7 @@
 #if CONFIG_PAGING_LEVELS >=4
     if (d->arch.ops->guest_paging_levels == PAGING_L2)
     {
-        /* for 32-bit VMX guest on 64-bit host, 
+        /* for 32-bit VMX guest on 64-bit host,
          * need update two L2 entries each time
          */
         if ( !get_shadow_ref(sl1mfn))
@@ -624,7 +622,7 @@
         l1_pgentry_t sl1e;
         int index = guest_l1_table_offset(va);
         int min = 1, max = 0;
-        
+
         unsigned long entries, pt_va;
         l1_pgentry_t tmp_sl1e;
         guest_l1_pgentry_t tmp_gl1e;//Prepare for double compile
@@ -790,7 +788,7 @@
 
         /* Record the allocation block so it can be correctly freed later. */
         d->arch.out_of_sync_extras_count++;
-        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) = 
+        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) =
             d->arch.out_of_sync_extras;
         d->arch.out_of_sync_extras = &extra[0];
 
@@ -1020,7 +1018,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)? 
+    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)?
                           pagetable_get_pfn(v->arch.guest_table) :
                           pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -1082,7 +1080,7 @@
         return 1;
 
     __guest_get_l2e(v, va, &l2e);
-    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) || 
+    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) ||
          (guest_l2e_get_flags(l2e) & _PAGE_PSE))
         return 0;
 
@@ -1155,7 +1153,7 @@
 }
 
 static int fix_entry(
-    struct domain *d, 
+    struct domain *d,
     l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
 {
     l1_pgentry_t old = *pt;
@@ -1194,19 +1192,19 @@
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
+        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask)
             >> PGT_va_shift;
 
         if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
-             !l1e_has_changed(pt[i], match, flags) && 
+             !l1e_has_changed(pt[i], match, flags) &&
              fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
              !prediction )
             goto out;
     }
- 
+
     for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
+        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) &&
              fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
             break;
     }
@@ -1255,7 +1253,7 @@
     }
 
     if ( shadow_mode_external(d) ) {
-        if (write_refs-- == 0) 
+        if (write_refs-- == 0)
             return 0;
 
          // Use the back pointer to locate the shadow page that can contain
@@ -1275,7 +1273,7 @@
         a = &d->arch.shadow_ht[i];
         while ( a && a->gpfn_and_flags )
         {
-            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 
+            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow
 #if CONFIG_PAGING_LEVELS >= 4
               || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow
 #endif
@@ -1384,10 +1382,10 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      guest_l1e_has_changed(guest1[i], snapshot1[i], 
PAGE_FLAG_MASK) )
                 {
-                    int error; 
+                    int error;
 
                     error = validate_pte_change(d, guest1[i], &shadow1[i]);
-                    if ( error ==  -1 ) 
+                    if ( error ==  -1 )
                         unshadow_l1 = 1;
                     else {
                         need_flush |= error;
@@ -1474,7 +1472,7 @@
             l2_pgentry_t *guest2 = guest;
             l2_pgentry_t *snapshot2 = snapshot;
             l1_pgentry_t *shadow2 = shadow;
-            
+
             ASSERT(shadow_mode_write_all(d));
             BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
 
@@ -1634,7 +1632,7 @@
              !shadow_get_page_from_l1e(npte, d) )
             BUG();
         *ppte = npte;
-        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
+        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT,
                            (entry->writable_pl1e & 
~PAGE_MASK)/sizeof(l1_pgentry_t));
         shadow_put_page_from_l1e(opte, d);
 
@@ -1719,7 +1717,7 @@
 
 static inline int l1pte_read_fault(
     struct domain *d, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
-{ 
+{
     guest_l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte = *spte_p;
     unsigned long pfn = l1e_get_pfn(gpte);
@@ -1761,7 +1759,7 @@
     SH_VVLOG("shadow_fault( va=%lx, code=%lu )",
              va, (unsigned long)regs->error_code);
     perfc_incrc(shadow_fault_calls);
-    
+
     check_pagetable(v, "pre-sf");
 
     /*
@@ -1804,7 +1802,7 @@
     }
 
     /* Write fault? */
-    if ( regs->error_code & 2 )  
+    if ( regs->error_code & 2 )
     {
         int allow_writes = 0;
 
@@ -1818,7 +1816,7 @@
             else
             {
                 /* Write fault on a read-only mapping. */
-                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte 
")", 
+                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte 
")",
                          l1e_get_intpte(gpte));
                 perfc_incrc(shadow_fault_bail_ro_mapping);
                 goto fail;
@@ -1878,7 +1876,7 @@
     check_pagetable(v, "post-sf");
     return EXCRET_fault_fixed;
 
- fail:
+fail:
     shadow_unlock(d);
     return 0;
 }
@@ -1895,7 +1893,7 @@
     shadow_lock(d);
 
     //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void 
*)l1e_get_intpte(val));
-        
+
     // This is actually overkill - we don't need to sync the L1 itself,
     // just everything involved in getting to this L1 (i.e. we need
     // linear_pg_table[l1_linear_offset(va)] to be in sync)...
@@ -1925,7 +1923,7 @@
  * and what it uses to get/maintain that mapping.
  *
  * SHADOW MODE:      none         enable         translate         external
- * 
+ *
  * 4KB things:
  * guest_vtable    lin_l2     mapped per gl2   lin_l2 via hl2   mapped per gl2
  * shadow_vtable     n/a         sh_lin_l2       sh_lin_l2      mapped per gl2
@@ -1950,7 +1948,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)? 
+    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)?
                           pagetable_get_pfn(v->arch.guest_table) :
                           pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -2006,7 +2004,7 @@
     /*
      * arch.shadow_vtable
      */
-    if ( max_mode == SHM_external 
+    if ( max_mode == SHM_external
 #if CONFIG_PAGING_LEVELS >=4
          || max_mode & SHM_enable
 #endif
@@ -2241,7 +2239,7 @@
                page_table_page);
         FAIL("RW2 coherence");
     }
- 
+
     if ( eff_guest_mfn == shadow_mfn )
     {
         if ( level > 1 )
@@ -2291,7 +2289,7 @@
         errors += check_pte(v, p_guest+i, p_shadow+i,
                             p_snapshot ? p_snapshot+i : NULL,
                             1, l2_idx, i);
- 
+
     unmap_domain_page(p_shadow);
     unmap_domain_page(p_guest);
     if ( p_snapshot )
@@ -2327,11 +2325,11 @@
 
 #if 0
     if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
-                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
+                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                 ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
                  DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
     {
-        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE; 
+        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
               i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
               i++ )
             printk("+++ (%d) %lx %lx\n",i,
@@ -2339,7 +2337,7 @@
         FAILPT("hypervisor entries inconsistent");
     }
 
-    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) != 
+    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
           l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
         FAILPT("hypervisor linear map inconsistent");
 #endif
@@ -2399,7 +2397,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)? 
+    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)?
                       pagetable_get_pfn(v->arch.guest_table) :
                       pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -2434,7 +2432,7 @@
         oos_pdes = 1;
         ASSERT(ptbase_mfn);
     }
- 
+
     errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
 
     gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn);
@@ -2565,7 +2563,6 @@
  * The code is for 32-bit VMX gues on 64-bit host.
  * To sync guest L2.
  */
-
 static inline void
 validate_bl2e_change(
   struct domain *d,
@@ -2596,7 +2593,6 @@
             entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx]));
     }
     unmap_domain_page(sl2_p);
-
 }
 
 /*
@@ -2629,9 +2625,8 @@
     }
 
     unmap_domain_page(spl4e);
+
     return smfn;
-
-
 }
 
 static unsigned long shadow_l4_table(
@@ -2664,7 +2659,7 @@
          * We could proactively fill in PDEs for pages that are already
          * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
          * (restriction required for coherence of the accessed bit). However,
-         * we tried it and it didn't help performance. This is simpler. 
+         * we tried it and it didn't help performance. This is simpler.
          */
         memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
 
@@ -2757,7 +2752,7 @@
     }
 }
 
-static void shadow_map_into_current(struct vcpu *v, 
+static void shadow_map_into_current(struct vcpu *v,
   unsigned long va, unsigned int from, unsigned int to)
 {
     pgentry_64_t gle, sle;
@@ -2768,7 +2763,7 @@
         return;
     }
 
-    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to); 
+    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to);
     ASSERT(entry_get_flags(gle) & _PAGE_PRESENT);
     gpfn = entry_get_pfn(gle);
 
@@ -2784,7 +2779,7 @@
 /*
  * shadow_set_lxe should be put in shadow.h
  */
-static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 
+static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e,
   int create_l2_shadow, int put_ref_check)
 {
     struct vcpu *v = current;
@@ -2934,11 +2929,11 @@
         sl2e = l2e_empty();
 
     l1_mfn = ___shadow_status(d, start_gpfn | nx, PGT_fl1_shadow);
-    
+
     /* Check the corresponding l2e */
     if (l1_mfn) {
         /* Why it is PRESENT?*/
-        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) && 
+        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) &&
                 l2e_get_pfn(sl2e) == l1_mfn) {
             ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", 
l2e_get_pfn(sl2e), l1_mfn);
         } else {
@@ -2985,7 +2980,7 @@
         sl1e = l1e_from_pfn(mfn, l2e_get_flags(tmp_l2e));
 
         if (!rw) {
-            if ( shadow_mode_log_dirty(d) || 
+            if ( shadow_mode_log_dirty(d) ||
               !(l2e_get_flags(gl2e) & _PAGE_DIRTY) || mfn_is_page_table(mfn) )
             {
                 l1e_remove_flags(sl1e, _PAGE_RW);
@@ -3034,7 +3029,7 @@
  */
 #if defined( GUEST_PGENTRY_32 )
 static inline int guest_page_fault(struct vcpu *v,
-  unsigned long va, unsigned int error_code, 
+  unsigned long va, unsigned int error_code,
   guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
 {
     /* The following check for 32-bit guest on 64-bit host */
@@ -3076,7 +3071,7 @@
 }
 #else
 static inline int guest_page_fault(struct vcpu *v,
-  unsigned long va, unsigned int error_code, 
+  unsigned long va, unsigned int error_code,
   guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
 {
     struct domain *d = v->domain;
@@ -3144,7 +3139,7 @@
 
     perfc_incrc(shadow_fault_calls);
 
-    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n", 
+    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n",
             va, regs->eip, regs->error_code);
 
     /*
@@ -3166,12 +3161,12 @@
             v, va, regs->error_code, &gl2e, &gl1e) ) {
         goto fail;
     }
-    
+
     if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PSE)) ) {
         /*
          * Handle 4K pages here
          */
-        
+
         /* Write fault? */
         if ( regs->error_code & 2 ) {
             if ( !l1pte_write_fault(v, &gl1e, &sl1e, va) ) {
@@ -3194,7 +3189,7 @@
          */
          if ( unlikely(shadow_mode_log_dirty(d)) )
             __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
- 
+
     } else {
         /*
          * Handle 2M pages here
@@ -3262,7 +3257,7 @@
 
     if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
         return 0;
-    
+
     if (guest_l2e_get_flags(gl2e) & _PAGE_PSE)
         gpa = guest_l2e_get_paddr(gl2e) + (gva & ((1 << 
GUEST_L2_PAGETABLE_SHIFT) - 1));
     else
diff -r 7f8db234e9db -r 4321438e92a7 xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Wed Nov  2 15:42:29 2005
+++ b/xen/arch/x86/vmx_platform.c       Wed Nov  2 15:43:32 2005
@@ -303,20 +303,20 @@
     mmio_inst->flags = 0;
 }
 
-#define GET_OP_SIZE_FOR_BYTE(op_size)   \
-    do {    \
-     if (rex)   \
-     op_size = BYTE_64;  \
- else    \
-     op_size = BYTE;  \
+#define GET_OP_SIZE_FOR_BYTE(op_size)       \
+    do {                                    \
+        if (rex)                            \
+            op_size = BYTE_64;              \
+        else                                \
+            op_size = BYTE;                 \
     } while(0)
 
 #define GET_OP_SIZE_FOR_NONEBYTE(op_size)   \
-    do {    \
-     if (rex & 0x8)   \
-     op_size = QUAD;  \
- else if (op_size != WORD) \
-     op_size = LONG;  \
+    do {                                    \
+        if (rex & 0x8)                      \
+            op_size = QUAD;                 \
+        else if (op_size != WORD)           \
+            op_size = LONG;                 \
     } while(0)
 
 
@@ -398,8 +398,9 @@
 
     case 0x20: /* and r8, m8 */
         instr->instr = INSTR_AND;
-        GET_OP_SIZE_FOR_BYTE(instr->op_size);
-        return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->op_size = BYTE;
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return reg_mem(size_reg, opcode, instr, rex);
 
     case 0x21: /* and r32/16, m32/16 */
         instr->instr = INSTR_AND;
@@ -413,8 +414,9 @@
 
     case 0x30: /* xor r8, m8 */
         instr->instr = INSTR_XOR;
-        GET_OP_SIZE_FOR_BYTE(instr->op_size);
-        return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->op_size = BYTE;
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return reg_mem(size_reg, opcode, instr, rex);
 
     case 0x31: /* xor r32/16, m32/16 */
         instr->instr = INSTR_XOR;
@@ -592,7 +594,7 @@
         instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
         return DECODE_success;
 
-    case 0xB7: /* movz m16, r32 */
+    case 0xB7: /* movz m16/m32, r32/r64 */
         instr->instr = INSTR_MOVZ;
         index = get_index(opcode + 1, rex);
         if (rex & 0x8) {
@@ -689,9 +691,9 @@
                           struct mmio_op *mmio_opp, struct cpu_user_regs *regs)
 {
     unsigned long value = 0;
-    int index, size;
-
-    size = operand_size(inst->operand[0]);
+    int index, size_reg;
+
+    size_reg = operand_size(inst->operand[0]);
 
     mmio_opp->flags = inst->flags;
     mmio_opp->instr = inst->instr;
@@ -701,14 +703,17 @@
 
     if (inst->operand[0] & REGISTER) { /* dest is memory */
         index = operand_index(inst->operand[0]);
-        value = get_reg_value(size, index, 0, regs);
+        value = get_reg_value(size_reg, index, 0, regs);
         send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
     } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
         value = inst->immediate;
         send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
     } else if (inst->operand[0] & MEMORY) { /* dest is register */
         /* send the request and wait for the value */
-        send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0);
+        if (inst->instr == INSTR_MOVZ)
+            send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, 0);
+        else
+            send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0);
     } else {
         printf("mmio_operands: invalid operand\n");
         domain_crash_synchronous();
diff -r 7f8db234e9db -r 4321438e92a7 patches/linux-2.6.12/cpu-hotplug-init.patch
--- /dev/null   Wed Nov  2 15:42:29 2005
+++ b/patches/linux-2.6.12/cpu-hotplug-init.patch       Wed Nov  2 15:43:32 2005
@@ -0,0 +1,34 @@
+diff -ur linux-2.6.12.orig/include/linux/init.h 
linux-2.6.12/include/linux/init.h
+--- linux-2.6.12.orig/include/linux/init.h     2005-11-01 14:52:28.656025573 
+0000
++++ linux-2.6.12/include/linux/init.h  2005-11-01 14:53:28.015791549 +0000
+@@ -229,6 +229,18 @@
+ #define __devexitdata __exitdata
+ #endif
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++#define __cpuinit
++#define __cpuinitdata
++#define __cpuexit
++#define __cpuexitdata
++#else
++#define __cpuinit     __init
++#define __cpuinitdata __initdata
++#define __cpuexit __exit
++#define __cpuexitdata __exitdata
++#endif
++
+ /* Functions marked as __devexit may be discarded at kernel link time, 
depending
+    on config options.  Newer versions of binutils detect references from
+    retained sections to discarded sections and flag an error.  Pointers to
+diff -ur linux-2.6.12.orig/arch/x86_64/kernel/i387.c 
linux-2.6.12/arch/x86_64/kernel/i387.c
+--- linux-2.6.12.orig/arch/x86_64/kernel/i387.c        2005-11-01 
15:01:58.932991232 +0000
++++ linux-2.6.12/arch/x86_64/kernel/i387.c     2005-11-01 15:02:09.729312416 
+0000
+@@ -42,7 +42,7 @@
+  * Called at bootup to set up the initial FPU state that is later cloned
+  * into all processes.
+  */
+-void __init fpu_init(void)
++void __cpuinit fpu_init(void)
+ {
+       unsigned long oldcr0 = read_cr0();
+       extern void __bad_fxsave_alignment(void);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.