[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86, hvm: Lots of MTRR/PAT emulation cleanup.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1208349404 -3600
# Node ID cd5dc735bdf3feac543f54d01ab4a1ce9d5a1292
# Parent  06242949ff569930bdb13f627fbb54ea13d8af08
x86, hvm: Lots of MTRR/PAT emulation cleanup.

 - Move MTRR MSR initialisation into hvmloader.
 - Simplify initialisation logic by overlaying UC on default WB rather
   than vice versa.
 - Clean up hypervisor HVM MTRR/PAE code's interface with rest of
   hypervisor.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 tools/firmware/hvmloader/Makefile     |    5 
 tools/firmware/hvmloader/acpi/build.c |    4 
 tools/firmware/hvmloader/cacheattr.c  |   99 ++++++++++
 tools/firmware/hvmloader/config.h     |    3 
 tools/firmware/hvmloader/hvmloader.c  |    2 
 tools/firmware/hvmloader/smp.c        |    8 
 xen/arch/x86/cpu/mtrr/main.c          |    7 
 xen/arch/x86/hvm/emulate.c            |    4 
 xen/arch/x86/hvm/hvm.c                |  150 +++++++++++++++-
 xen/arch/x86/hvm/mtrr.c               |  312 +++++++---------------------------
 xen/arch/x86/hvm/svm/svm.c            |   36 ---
 xen/arch/x86/hvm/vmx/vmx.c            |  101 -----------
 xen/arch/x86/mm.c                     |    9 
 xen/include/asm-x86/hvm/hvm.h         |    2 
 xen/include/asm-x86/hvm/support.h     |    2 
 xen/include/asm-x86/mtrr.h            |    8 
 16 files changed, 346 insertions(+), 406 deletions(-)

diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/Makefile
--- a/tools/firmware/hvmloader/Makefile Wed Apr 16 10:21:08 2008 +0100
+++ b/tools/firmware/hvmloader/Makefile Wed Apr 16 13:36:44 2008 +0100
@@ -28,8 +28,9 @@ LOADADDR = 0x100000
 
 CFLAGS += $(CFLAGS_include) -I.
 
-SRCS = hvmloader.c mp_tables.c util.c smbios.c 32bitbios_support.c smp.c
-OBJS = $(patsubst %.c,%.o,$(SRCS))
+SRCS  = hvmloader.c mp_tables.c util.c smbios.c 
+SRCS += 32bitbios_support.c smp.c cacheattr.c
+OBJS  = $(patsubst %.c,%.o,$(SRCS))
 
 .PHONY: all
 all: hvmloader
diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/acpi/build.c
--- a/tools/firmware/hvmloader/acpi/build.c     Wed Apr 16 10:21:08 2008 +0100
+++ b/tools/firmware/hvmloader/acpi/build.c     Wed Apr 16 13:36:44 2008 +0100
@@ -84,8 +84,8 @@ static int construct_bios_info_table(uin
 
     bios_info->hpet_present = hpet_exists(ACPI_HPET_ADDRESS);
 
-    bios_info->pci_min = 0xf0000000;
-    bios_info->pci_len = 0x0c000000;
+    bios_info->pci_min = PCI_MEMBASE;
+    bios_info->pci_len = PCI_MEMSIZE;
 
     return align16(sizeof(*bios_info));
 }
diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/cacheattr.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/firmware/hvmloader/cacheattr.c      Wed Apr 16 13:36:44 2008 +0100
@@ -0,0 +1,99 @@
+/*
+ * cacheattr.c: MTRR and PAT initialisation.
+ *
+ * Copyright (c) 2008, Citrix Systems, Inc.
+ * 
+ * Authors:
+ *    Keir Fraser <keir.fraser@xxxxxxxxxx>
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include "util.h"
+#include "config.h"
+
+#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+#define MSR_MTRRcap          0x00fe
+#define MSR_MTRRfix64K_00000 0x0250
+#define MSR_MTRRfix16K_80000 0x0258
+#define MSR_MTRRfix16K_A0000 0x0259
+#define MSR_MTRRfix4K_C0000  0x0268
+#define MSR_MTRRfix4K_C8000  0x0269
+#define MSR_MTRRfix4K_D0000  0x026a
+#define MSR_MTRRfix4K_D8000  0x026b
+#define MSR_MTRRfix4K_E0000  0x026c
+#define MSR_MTRRfix4K_E8000  0x026d
+#define MSR_MTRRfix4K_F0000  0x026e
+#define MSR_MTRRfix4K_F8000  0x026f
+#define MSR_PAT              0x0277
+#define MSR_MTRRdefType      0x02ff
+
+void cacheattr_init(void)
+{
+    uint32_t eax, ebx, ecx, edx;
+    uint64_t mtrr_cap, mtrr_def, content, addr_mask;
+    unsigned int i, nr_var_ranges, phys_bits = 36;
+
+    /* Does the CPU support architectural MTRRs? */
+    cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+    if ( !(edx & (1u << 12)) )
+         return;
+
+    /* Find the physical address size for this CPU. */
+    cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
+    if ( eax >= 0x80000008 )
+    {
+        cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+        phys_bits = (uint8_t)eax;
+    }
+
+    printf("%u-bit phys ... ", phys_bits);
+
+    addr_mask = ((1ull << phys_bits) - 1) & ~((1ull << 12) - 1);
+    mtrr_cap = rdmsr(MSR_MTRRcap);
+    mtrr_def = (1u << 11) | 6; /* E, default type WB */
+
+    /* Fixed-range MTRRs supported? */
+    if ( mtrr_cap & (1u << 8) )
+    {
+        /* 0x00000-0x9ffff: Write Back (WB) */
+        content = 0x0606060606060606ull;
+        wrmsr(MSR_MTRRfix64K_00000, content);
+        wrmsr(MSR_MTRRfix16K_80000, content);
+        /* 0xa0000-0xbffff: Write Combining (WC) */
+        if ( mtrr_cap & (1u << 10) ) /* WC supported? */
+            content = 0x0101010101010101ull;
+        wrmsr(MSR_MTRRfix16K_A0000, content);
+        /* 0xc0000-0xfffff: Write Back (WB) */
+        content = 0x0606060606060606ull;
+        for ( i = 0; i < 8; i++ )
+            wrmsr(MSR_MTRRfix4K_C0000 + i, content);
+        mtrr_def |= 1u << 10; /* FE */
+        printf("fixed MTRRs ... ");
+    }
+
+    /* Variable-range MTRRs supported? */
+    nr_var_ranges = (uint8_t)mtrr_cap;
+    if ( nr_var_ranges != 0 )
+    {
+        /* A single UC range covering PCI space. */
+        wrmsr(MSR_MTRRphysBase(0), PCI_MEMBASE);
+        wrmsr(MSR_MTRRphysMask(0),
+              ((uint64_t)(int32_t)PCI_MEMBASE & addr_mask) | (1u << 11));
+        printf("var MTRRs ... ");
+    }
+
+    wrmsr(MSR_MTRRdefType, mtrr_def);
+}
diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/config.h
--- a/tools/firmware/hvmloader/config.h Wed Apr 16 10:21:08 2008 +0100
+++ b/tools/firmware/hvmloader/config.h Wed Apr 16 13:36:44 2008 +0100
@@ -10,6 +10,9 @@
 
 #define PCI_ISA_DEVFN       0x08    /* dev 1, fn 0 */
 #define PCI_ISA_IRQ_MASK    0x0c20U /* ISA IRQs 5,10,11 are PCI connected */
+
+#define PCI_MEMBASE         0xf0000000
+#define PCI_MEMSIZE         0x0c000000
 
 #define ROMBIOS_SEG            0xF000
 #define ROMBIOS_BEGIN          0x000F0000
diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/hvmloader.c
--- a/tools/firmware/hvmloader/hvmloader.c      Wed Apr 16 10:21:08 2008 +0100
+++ b/tools/firmware/hvmloader/hvmloader.c      Wed Apr 16 13:36:44 2008 +0100
@@ -159,7 +159,7 @@ static void pci_setup(void)
     struct resource {
         uint32_t base, max;
     } *resource;
-    struct resource mem_resource = { 0xf0000000, 0xfc000000 };
+    struct resource mem_resource = { PCI_MEMBASE, PCI_MEMBASE + PCI_MEMSIZE };
     struct resource io_resource  = { 0xc000, 0x10000 };
 
     /* Create a list of device BARs in descending order of size. */
diff -r 06242949ff56 -r cd5dc735bdf3 tools/firmware/hvmloader/smp.c
--- a/tools/firmware/hvmloader/smp.c    Wed Apr 16 10:21:08 2008 +0100
+++ b/tools/firmware/hvmloader/smp.c    Wed Apr 16 13:36:44 2008 +0100
@@ -69,10 +69,12 @@ asm (
     "    .text                       \n"
     );
 
+extern void cacheattr_init(void);
+
 /*static*/ void ap_start(void)
 {
     printf(" - CPU%d ... ", ap_cpuid);
-
+    cacheattr_init();
     printf("done.\n");
     wmb();
     ap_callin = 1;
@@ -122,12 +124,10 @@ void smp_initialise(void)
 {
     unsigned int i, nr_cpus = get_vcpu_nr();
 
-    if ( nr_cpus <= 1 )
-        return;
-
     memcpy((void *)AP_BOOT_EIP, ap_boot_start, ap_boot_end - ap_boot_start);
 
     printf("Multiprocessor initialisation:\n");
+    ap_start();
     for ( i = 1; i < nr_cpus; i++ )
         boot_cpu(i);
 }
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/cpu/mtrr/main.c      Wed Apr 16 13:36:44 2008 +0100
@@ -586,8 +586,6 @@ struct mtrr_value {
        unsigned long   lsize;
 };
 
-extern void global_init_mtrr_pat(void);
-
 /**
  * mtrr_bp_init - initialize mtrrs on the boot CPU
  *
@@ -654,11 +652,8 @@ void __init mtrr_bp_init(void)
        if (mtrr_if) {
                set_num_var_ranges();
                init_table();
-               if (use_intel()) {
+               if (use_intel())
                        get_mtrr_state();
-                       /* initialize some global data for MTRR/PAT 
virutalization */
-                       global_init_mtrr_pat();
-               }
        }
 }
 
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Wed Apr 16 13:36:44 2008 +0100
@@ -603,7 +603,7 @@ static int hvmemul_read_msr(
 
     _regs.ecx = (uint32_t)reg;
 
-    if ( (rc = hvm_funcs.msr_read_intercept(&_regs)) != 0 )
+    if ( (rc = hvm_msr_read_intercept(&_regs)) != 0 )
         return rc;
 
     *val = ((uint64_t)(uint32_t)_regs.edx << 32) || (uint32_t)_regs.eax;
@@ -621,7 +621,7 @@ static int hvmemul_write_msr(
     _regs.eax = (uint32_t)val;
     _regs.ecx = (uint32_t)reg;
 
-    return hvm_funcs.msr_write_intercept(&_regs);
+    return hvm_msr_write_intercept(&_regs);
 }
 
 static int hvmemul_wbinvd(
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Apr 16 13:36:44 2008 +0100
@@ -620,8 +620,6 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_
 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
                           1, HVMSR_PER_VCPU);
 
-extern int reset_vmsr(struct mtrr_state *m, u64 *p);
-
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
@@ -647,7 +645,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
 
-    rc = reset_vmsr(&v->arch.hvm_vcpu.mtrr, &v->arch.hvm_vcpu.pat_cr);
+    rc = hvm_vcpu_cacheattr_init(v);
     if ( rc != 0 )
         goto fail3;
 
@@ -681,8 +679,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
 void hvm_vcpu_destroy(struct vcpu *v)
 {
-    xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
-
+    hvm_vcpu_cacheattr_destroy(v);
     vlapic_destroy(v);
     hvm_funcs.vcpu_destroy(v);
 
@@ -1606,6 +1603,9 @@ void hvm_cpuid(unsigned int input, unsig
         *ebx &= 0x0000FFFFu;
         *ebx |= (current->vcpu_id * 2) << 24;
 
+        /* We always support MTRR MSRs. */
+        *edx |= bitmaskof(X86_FEATURE_MTRR);
+
         *ecx &= (bitmaskof(X86_FEATURE_XMM3) |
                  bitmaskof(X86_FEATURE_SSSE3) |
                  bitmaskof(X86_FEATURE_CX16) |
@@ -1655,6 +1655,146 @@ void hvm_cpuid(unsigned int input, unsig
 #endif
         break;
     }
+}
+
+int hvm_msr_read_intercept(struct cpu_user_regs *regs)
+{
+    uint32_t ecx = regs->ecx;
+    uint64_t msr_content = 0;
+    struct vcpu *v = current;
+    uint64_t *var_range_base, *fixed_range_base;
+    int index;
+
+    var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
+    fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
+
+    switch ( ecx )
+    {
+    case MSR_IA32_TSC:
+        msr_content = hvm_get_guest_time(v);
+        break;
+
+    case MSR_IA32_APICBASE:
+        msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
+        break;
+
+    case MSR_IA32_MCG_CAP:
+    case MSR_IA32_MCG_STATUS:
+    case MSR_IA32_MC0_STATUS:
+    case MSR_IA32_MC1_STATUS:
+    case MSR_IA32_MC2_STATUS:
+    case MSR_IA32_MC3_STATUS:
+    case MSR_IA32_MC4_STATUS:
+    case MSR_IA32_MC5_STATUS:
+        /* No point in letting the guest see real MCEs */
+        msr_content = 0;
+        break;
+
+    case MSR_IA32_CR_PAT:
+        msr_content = v->arch.hvm_vcpu.pat_cr;
+        break;
+
+    case MSR_MTRRcap:
+        msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
+        break;
+    case MSR_MTRRdefType:
+        msr_content = v->arch.hvm_vcpu.mtrr.def_type
+                        | (v->arch.hvm_vcpu.mtrr.enabled << 10);
+        break;
+    case MSR_MTRRfix64K_00000:
+        msr_content = fixed_range_base[0];
+        break;
+    case MSR_MTRRfix16K_80000:
+    case MSR_MTRRfix16K_A0000:
+        index = regs->ecx - MSR_MTRRfix16K_80000;
+        msr_content = fixed_range_base[index + 1];
+        break;
+    case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
+        index = regs->ecx - MSR_MTRRfix4K_C0000;
+        msr_content = fixed_range_base[index + 3];
+        break;
+    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
+        index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0;
+        msr_content = var_range_base[index];
+        break;
+
+    default:
+        return hvm_funcs.msr_read_intercept(regs);
+    }
+
+    regs->eax = (uint32_t)msr_content;
+    regs->edx = (uint32_t)(msr_content >> 32);
+    return X86EMUL_OKAY;
+}
+
+int hvm_msr_write_intercept(struct cpu_user_regs *regs)
+{
+    extern bool_t mtrr_var_range_msr_set(
+        struct mtrr_state *v, u32 msr, u64 msr_content);
+    extern bool_t mtrr_fix_range_msr_set(
+        struct mtrr_state *v, int row, u64 msr_content);
+    extern bool_t mtrr_def_type_msr_set(struct mtrr_state *v, u64 msr_content);
+    extern bool_t pat_msr_set(u64 *pat, u64 msr);
+
+    uint32_t ecx = regs->ecx;
+    uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32);
+    struct vcpu *v = current;
+    int index;
+
+    switch ( ecx )
+    {
+     case MSR_IA32_TSC:
+        hvm_set_guest_time(v, msr_content);
+        pt_reset(v);
+        break;
+
+    case MSR_IA32_APICBASE:
+        vlapic_msr_set(vcpu_vlapic(v), msr_content);
+        break;
+
+    case MSR_IA32_CR_PAT:
+        if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) )
+           goto gp_fault;
+        break;
+
+    case MSR_MTRRcap:
+        goto gp_fault;
+    case MSR_MTRRdefType:
+        if ( !mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, msr_content) )
+           goto gp_fault;
+        break;
+    case MSR_MTRRfix64K_00000:
+        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, 0, msr_content) )
+            goto gp_fault;
+        break;
+    case MSR_MTRRfix16K_80000:
+    case MSR_MTRRfix16K_A0000:
+        index = regs->ecx - MSR_MTRRfix16K_80000 + 1;
+        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
+                                     index, msr_content) )
+            goto gp_fault;
+        break;
+    case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
+        index = regs->ecx - MSR_MTRRfix4K_C0000 + 3;
+        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
+                                     index, msr_content) )
+            goto gp_fault;
+        break;
+    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
+        if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr,
+                                     regs->ecx, msr_content) )
+            goto gp_fault;
+        break;
+
+    default:
+        return hvm_funcs.msr_write_intercept(regs);
+    }
+
+    return X86EMUL_OKAY;
+
+gp_fault:
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return X86EMUL_EXCEPTION;
 }
 
 enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/hvm/mtrr.c   Wed Apr 16 13:36:44 2008 +0100
@@ -27,7 +27,6 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/cacheattr.h>
 
-/* Xen holds the native MTRR MSRs */
 extern struct mtrr_state mtrr_state;
 
 static uint64_t phys_base_msr_mask;
@@ -35,19 +34,17 @@ static uint32_t size_or_mask;
 static uint32_t size_or_mask;
 static uint32_t size_and_mask;
 
-static void init_pat_entry_tbl(uint64_t pat);
-static void init_mtrr_epat_tbl(void);
-static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa);
-/* get page attribute fields (PAn) from PAT MSR */
+/* Get page attribute fields (PAn) from PAT MSR. */
 #define pat_cr_2_paf(pat_cr,n)  ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
-/* pat entry to PTE flags (PAT, PCD, PWT bits) */
+
+/* PAT entry to PTE flags (PAT, PCD, PWT bits). */
 static uint8_t pat_entry_2_pte_flags[8] = {
     0,           _PAGE_PWT,
     _PAGE_PCD,   _PAGE_PCD | _PAGE_PWT,
     _PAGE_PAT,   _PAGE_PAT | _PAGE_PWT,
     _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
 
-/* effective mm type lookup table, according to MTRR and PAT */
+/* Effective mm type lookup table, according to MTRR and PAT. */
 static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
 /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
 /* RS means reserved type(2,3), and type is hardcoded here */
@@ -67,12 +64,13 @@ static uint8_t mm_type_tbl[MTRR_NUM_TYPE
             {0, 1, 2, 2, 4, 5, 6, 0}
 };
 
-/* reverse lookup table, to find a pat type according to MTRR and effective
- * memory type. This table is dynamically generated
+/*
+ * Reverse lookup table, to find a pat type according to MTRR and effective
+ * memory type. This table is dynamically generated.
  */
 static uint8_t mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
 
-/* lookup table for PAT entry of a given PAT value in host pat */
+/* Lookup table for PAT entry of a given PAT value in host PAT. */
 static uint8_t pat_entry_tbl[PAT_TYPE_NUMS];
 
 static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
@@ -139,220 +137,63 @@ bool_t is_var_mtrr_overlapped(struct mtr
     return 0;
 }
 
-/* reserved mtrr for guest OS */
-#define RESERVED_MTRR 2
+#define MTRR_PHYSMASK_VALID_BIT  11
+#define MTRR_PHYSMASK_SHIFT      12
+
+#define MTRR_PHYSBASE_TYPE_MASK  0xff   /* lowest 8 bits */
+#define MTRR_PHYSBASE_SHIFT      12
+#define MTRR_VCNT                8
+
 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
 bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
                               uint64_t msr_content);
-bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content);
 bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
                               uint64_t msr_content);
-static void set_var_mtrr(uint32_t reg, struct mtrr_state *m,
-                         uint32_t base, uint32_t size,
-                         uint32_t type)
-{
-    struct mtrr_var_range *vr;
-
-    vr = &m->var_ranges[reg];
-
-    if ( size == 0 )
-    {
-        /* The invalid bit is kept in the mask, so we simply clear the
-         * relevant mask register to disable a range.
-         */
-        mtrr_var_range_msr_set(m, MTRRphysMask_MSR(reg), 0);
-    }
-    else
-    {
-        vr->base_lo = base << PAGE_SHIFT | type;
-        vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
-        vr->mask_lo = -size << PAGE_SHIFT | 0x800;
-        vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
-
-        mtrr_var_range_msr_set(m, MTRRphysBase_MSR(reg), *(uint64_t *)vr);
-        mtrr_var_range_msr_set(m, MTRRphysMask_MSR(reg),
-                               *((uint64_t *)vr + 1));
-    }
-}
-/* From Intel Vol. III Section 10.11.4, the Range Size and Base Alignment has
- * some kind of requirement:
- * 1. The range size must be 2^N byte for N >= 12 (i.e 4KB minimum).
- * 2. The base address must be 2^N aligned, where the N here is equal to
- * the N in previous requirement. So a 8K range must be 8K aligned not 4K 
aligned.
- */
-static uint32_t range_to_mtrr(uint32_t reg, struct mtrr_state *m,
-                              uint32_t range_startk, uint32_t range_sizek,
-                              uint8_t type)
-{
-    if ( !range_sizek || (reg >= ((m->mtrr_cap & 0xff) - RESERVED_MTRR)) )
-    {
-        gdprintk(XENLOG_WARNING,
-                "Failed to init var mtrr msr[%d]"
-                "range_size:%x, total available MSR:%d\n",
-                reg, range_sizek,
-                (uint32_t)((m->mtrr_cap & 0xff) - RESERVED_MTRR));
-        return reg;
-    }
-
-    while ( range_sizek )
-    {
-        uint32_t max_align, align, sizek;
-
-        max_align = (range_startk == 0) ? 32 : ffs(range_startk);
-        align = min_t(uint32_t, fls(range_sizek), max_align);
-        sizek = 1 << (align - 1);
-
-        set_var_mtrr(reg++, m, range_startk, sizek, type);
-
-        range_startk += sizek;
-        range_sizek  -= sizek;
-
-        if ( reg >= ((m->mtrr_cap & 0xff) - RESERVED_MTRR) )
-        {
-            gdprintk(XENLOG_WARNING,
-                    "Failed to init var mtrr msr[%d],"
-                    "total available MSR:%d\n",
-                    reg, (uint32_t)((m->mtrr_cap & 0xff) - RESERVED_MTRR));
-            break;
-        }
-    }
-
-    return reg;
-}
-
-static void setup_fixed_mtrrs(struct vcpu *v)
-{
-    uint64_t content;
-    int32_t i;
-    struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
-
-    /* 1. Map (0~A0000) as WB */
-    content = 0x0606060606060606ull;
-    mtrr_fix_range_msr_set(m, 0, content);
-    mtrr_fix_range_msr_set(m, 1, content);
-    /* 2. Map VRAM(A0000~C0000) as WC */
-    content = 0x0101010101010101;
-    mtrr_fix_range_msr_set(m, 2, content);
-    /* 3. Map (C0000~100000) as UC */
-    for ( i = 3; i < 11; i++)
-        mtrr_fix_range_msr_set(m, i, 0);
-}
-
-static void setup_var_mtrrs(struct vcpu *v)
-{
-    p2m_type_t p2m;
-    uint64_t e820_mfn;
-    int8_t *p = NULL;
-    uint8_t nr = 0;
-    int32_t i;
-    uint32_t reg = 0;
-    uint64_t size = 0;
-    uint64_t addr = 0;
-    struct e820entry *e820_table;
-
-    e820_mfn = mfn_x(gfn_to_mfn(v->domain,
-                    HVM_E820_PAGE >> PAGE_SHIFT, &p2m));
-
-    p = (int8_t *)map_domain_page(e820_mfn);
-
-    nr = *(uint8_t*)(p + HVM_E820_NR_OFFSET);
-    e820_table = (struct e820entry*)(p + HVM_E820_OFFSET);
-    /* search E820 table, set MTRR for RAM */
-    for ( i = 0; i < nr; i++)
-    {
-        if ( (e820_table[i].addr >= 0x100000) &&
-             (e820_table[i].type == E820_RAM) )
-        {
-            if ( e820_table[i].addr == 0x100000 )
-            {
-                size = e820_table[i].size + 0x100000 + PAGE_SIZE * 5;
-                addr = 0;
-            }
-            else
-            {
-                /* Larger than 4G */
-                size = e820_table[i].size;
-                addr = e820_table[i].addr;
-            }
-
-            reg = range_to_mtrr(reg, &v->arch.hvm_vcpu.mtrr,
-                                addr >> PAGE_SHIFT, size >> PAGE_SHIFT,
-                                MTRR_TYPE_WRBACK);
-        }
-    }
-}
-
-void init_mtrr_in_hyper(struct vcpu *v)
-{
-    /* TODO:MTRR should be initialized in BIOS or other places.
-     * workaround to do it in here
-     */
-    if ( v->arch.hvm_vcpu.mtrr.is_initialized )
-        return;
-
-    setup_fixed_mtrrs(v);
-    setup_var_mtrrs(v);
-    /* enable mtrr */
-    mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, 0xc00);
-
-    v->arch.hvm_vcpu.mtrr.is_initialized = 1;
-}
-
-static int32_t reset_mtrr(struct mtrr_state *m)
-{
-    m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
-    if ( m->var_ranges == NULL )
-        return -ENOMEM;
-    memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
-    memset(m->fixed_ranges, 0, sizeof(m->fixed_ranges));
-    m->enabled = 0;
-    m->def_type = 0;/*mtrr is disabled*/
-    m->mtrr_cap = (0x5<<8)|MTRR_VCNT;/*wc,fix enabled, and vcnt=8*/
-    m->overlapped = 0;
-    return 0;
-}
-
-/* init global variables for MTRR and PAT */
-void global_init_mtrr_pat(void)
+
+static int hvm_mtrr_pat_init(void)
 {
     extern uint64_t host_pat;
-    uint32_t phys_addr;
-
-    init_mtrr_epat_tbl();
-    init_pat_entry_tbl(host_pat);
-    /* Get max physical address, set some global variable */
-    if ( cpuid_eax(0x80000000) < 0x80000008 )
-        phys_addr = 36;
-    else
-        phys_addr = cpuid_eax(0x80000008);
-
-    phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
-    phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
-
-    size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
-    size_and_mask = ~size_or_mask & 0xfff00000;
-}
-
-static void init_pat_entry_tbl(uint64_t pat)
-{
-    int32_t i, j;
+    unsigned int i, j, phys_addr;
+
+    memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
+    for ( i = 0; i < MTRR_NUM_TYPES; i++ )
+    {
+        for ( j = 0; j < PAT_TYPE_NUMS; j++ )
+        {
+            int32_t tmp = mm_type_tbl[i][j];
+            if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
+                mtrr_epat_tbl[i][tmp] = j;
+        }
+    }
 
     memset(&pat_entry_tbl, INVALID_MEM_TYPE,
            PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
-
     for ( i = 0; i < PAT_TYPE_NUMS; i++ )
     {
         for ( j = 0; j < PAT_TYPE_NUMS; j++ )
         {
-            if ( pat_cr_2_paf(pat, j) == i )
+            if ( pat_cr_2_paf(host_pat, j) == i )
             {
                 pat_entry_tbl[i] = j;
                 break;
             }
         }
     }
-}
+
+    phys_addr = 36;
+    if ( cpuid_eax(0x80000000) >= 0x80000008 )
+        phys_addr = (uint8_t)cpuid_eax(0x80000008);
+
+    phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
+    phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
+
+    size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
+    size_and_mask = ~size_or_mask & 0xfff00000;
+
+    return 0;
+}
+__initcall(hvm_mtrr_pat_init);
 
 uint8_t pat_type_2_pte_flags(uint8_t pat_type)
 {
@@ -368,24 +209,35 @@ uint8_t pat_type_2_pte_flags(uint8_t pat
     return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
 }
 
-int32_t reset_vmsr(struct mtrr_state *m, uint64_t *pat_ptr)
-{
-    int32_t rc;
-
-    rc = reset_mtrr(m);
-    if ( rc != 0 )
-        return rc;
-
-    *pat_ptr = ((uint64_t)PAT_TYPE_WRBACK) |               /* PAT0: WB */
-               ((uint64_t)PAT_TYPE_WRTHROUGH << 8) |       /* PAT1: WT */
-               ((uint64_t)PAT_TYPE_UC_MINUS << 16) |       /* PAT2: UC- */
-               ((uint64_t)PAT_TYPE_UNCACHABLE << 24) |     /* PAT3: UC */
-               ((uint64_t)PAT_TYPE_WRBACK << 32) |         /* PAT4: WB */
-               ((uint64_t)PAT_TYPE_WRTHROUGH << 40) |      /* PAT5: WT */
-               ((uint64_t)PAT_TYPE_UC_MINUS << 48) |       /* PAT6: UC- */
-               ((uint64_t)PAT_TYPE_UNCACHABLE << 56);      /* PAT7: UC */
-
-    return 0;
+int hvm_vcpu_cacheattr_init(struct vcpu *v)
+{
+    struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
+
+    memset(m, 0, sizeof(*m));
+
+    m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
+    if ( m->var_ranges == NULL )
+        return -ENOMEM;
+    memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
+
+    m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
+
+    v->arch.hvm_vcpu.pat_cr =
+        ((uint64_t)PAT_TYPE_WRBACK) |               /* PAT0: WB */
+        ((uint64_t)PAT_TYPE_WRTHROUGH << 8) |       /* PAT1: WT */
+        ((uint64_t)PAT_TYPE_UC_MINUS << 16) |       /* PAT2: UC- */
+        ((uint64_t)PAT_TYPE_UNCACHABLE << 24) |     /* PAT3: UC */
+        ((uint64_t)PAT_TYPE_WRBACK << 32) |         /* PAT4: WB */
+        ((uint64_t)PAT_TYPE_WRTHROUGH << 40) |      /* PAT5: WT */
+        ((uint64_t)PAT_TYPE_UC_MINUS << 48) |       /* PAT6: UC- */
+        ((uint64_t)PAT_TYPE_UNCACHABLE << 56);      /* PAT7: UC */
+
+    return 0;
+}
+
+void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
+{
+    xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
 }
 
 /*
@@ -512,23 +364,6 @@ static uint8_t effective_mm_type(struct 
     return effective;
 }
 
-static void init_mtrr_epat_tbl(void)
-{
-    int32_t i, j;
-    /* set default value to an invalid type, just for checking conflict */
-    memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
-
-    for ( i = 0; i < MTRR_NUM_TYPES; i++ )
-    {
-        for ( j = 0; j < PAT_TYPE_NUMS; j++ )
-        {
-            int32_t tmp = mm_type_tbl[i][j];
-            if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
-                mtrr_epat_tbl[i][tmp] = j;
-        }
-    }
-}
-
 uint32_t get_pat_flags(struct vcpu *v,
                        uint32_t gl1e_flags,
                        paddr_t gpaddr,
@@ -856,7 +691,6 @@ static int hvm_load_mtrr_msr(struct doma
 
     mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
 
-    v->arch.hvm_vcpu.mtrr.is_initialized = 1;
     return 0;
 }
 
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Apr 16 13:36:44 2008 +0100
@@ -911,6 +911,9 @@ static void svm_cpuid_intercept(
             __clear_bit(X86_FEATURE_PAE & 31, edx);
         __clear_bit(X86_FEATURE_PSE36 & 31, edx);
 
+        /* We always support MTRR MSRs. */
+        *edx |= bitmaskof(X86_FEATURE_MTRR);
+
         /* Filter all other features according to a whitelist. */
         *ecx &= (bitmaskof(X86_FEATURE_LAHF_LM) |
                  bitmaskof(X86_FEATURE_ALTMOVCR) |
@@ -981,14 +984,6 @@ static int svm_msr_read_intercept(struct
 
     switch ( ecx )
     {
-    case MSR_IA32_TSC:
-        msr_content = hvm_get_guest_time(v);
-        break;
-
-    case MSR_IA32_APICBASE:
-        msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
-        break;
-
     case MSR_EFER:
         msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
@@ -1013,18 +1008,6 @@ static int svm_msr_read_intercept(struct
 
     case MSR_K8_VM_HSAVE_PA:
         goto gpf;
-
-    case MSR_IA32_MCG_CAP:
-    case MSR_IA32_MCG_STATUS:
-    case MSR_IA32_MC0_STATUS:
-    case MSR_IA32_MC1_STATUS:
-    case MSR_IA32_MC2_STATUS:
-    case MSR_IA32_MC3_STATUS:
-    case MSR_IA32_MC4_STATUS:
-    case MSR_IA32_MC5_STATUS:
-        /* No point in letting the guest see real MCEs */
-        msr_content = 0;
-        break;
 
     case MSR_IA32_DEBUGCTLMSR:
         msr_content = vmcb->debugctlmsr;
@@ -1083,15 +1066,6 @@ static int svm_msr_write_intercept(struc
 
     switch ( ecx )
     {
-    case MSR_IA32_TSC:
-        hvm_set_guest_time(v, msr_content);
-        pt_reset(v);
-        break;
-
-    case MSR_IA32_APICBASE:
-        vlapic_msr_set(vcpu_vlapic(v), msr_content);
-        break;
-
     case MSR_K8_VM_HSAVE_PA:
         goto gpf;
 
@@ -1152,12 +1126,12 @@ static void svm_do_msr_access(struct cpu
 
     if ( vmcb->exitinfo1 == 0 )
     {
-        rc = svm_msr_read_intercept(regs);
+        rc = hvm_msr_read_intercept(regs);
         inst_len = __get_instruction_length(v, INSTR_RDMSR, NULL);
     }
     else
     {
-        rc = svm_msr_write_intercept(regs);
+        rc = hvm_msr_write_intercept(regs);
         inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
     }
 
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 16 13:36:44 2008 +0100
@@ -1622,17 +1622,11 @@ static int vmx_msr_read_intercept(struct
     u64 msr_content = 0;
     u32 ecx = regs->ecx, eax, edx;
     struct vcpu *v = current;
-    int index;
-    u64 *var_range_base = (u64*)v->arch.hvm_vcpu.mtrr.var_ranges;
-    u64 *fixed_range_base =  (u64*)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
     HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
 
     switch ( ecx )
     {
-    case MSR_IA32_TSC:
-        msr_content = hvm_get_guest_time(v);
-        break;
     case MSR_IA32_SYSENTER_CS:
         msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
         break;
@@ -1641,35 +1635,6 @@ static int vmx_msr_read_intercept(struct
         break;
     case MSR_IA32_SYSENTER_EIP:
         msr_content = __vmread(GUEST_SYSENTER_EIP);
-        break;
-    case MSR_IA32_APICBASE:
-        msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
-        break;
-    case MSR_IA32_CR_PAT:
-        msr_content = v->arch.hvm_vcpu.pat_cr;
-        break;
-    case MSR_MTRRcap:
-        msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
-        break;
-    case MSR_MTRRdefType:
-        msr_content = v->arch.hvm_vcpu.mtrr.def_type
-                        | (v->arch.hvm_vcpu.mtrr.enabled << 10);
-        break;
-    case MSR_MTRRfix64K_00000:
-        msr_content = fixed_range_base[0];
-        break;
-    case MSR_MTRRfix16K_80000:
-    case MSR_MTRRfix16K_A0000:
-        index = regs->ecx - MSR_MTRRfix16K_80000;
-        msr_content = fixed_range_base[index + 1];
-        break;
-    case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
-        index = regs->ecx - MSR_MTRRfix4K_C0000;
-        msr_content = fixed_range_base[index + 3];
-        break;
-    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
-        index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0;
-        msr_content = var_range_base[index];
         break;
     case MSR_IA32_DEBUGCTLMSR:
         msr_content = __vmread(GUEST_IA32_DEBUGCTL);
@@ -1679,17 +1644,6 @@ static int vmx_msr_read_intercept(struct
         break;
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
         goto gp_fault;
-    case MSR_IA32_MCG_CAP:
-    case MSR_IA32_MCG_STATUS:
-    case MSR_IA32_MC0_STATUS:
-    case MSR_IA32_MC1_STATUS:
-    case MSR_IA32_MC2_STATUS:
-    case MSR_IA32_MC3_STATUS:
-    case MSR_IA32_MC4_STATUS:
-    case MSR_IA32_MC5_STATUS:
-        /* No point in letting the guest see real MCEs */
-        msr_content = 0;
-        break;
     case MSR_IA32_MISC_ENABLE:
         rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
         /* Debug Trace Store is not supported. */
@@ -1729,8 +1683,8 @@ static int vmx_msr_read_intercept(struct
         goto gp_fault;
     }
 
-    regs->eax = msr_content & 0xFFFFFFFF;
-    regs->edx = msr_content >> 32;
+    regs->eax = (uint32_t)msr_content;
+    regs->edx = (uint32_t)(msr_content >> 32);
 
 done:
     hvmtrace_msr_read(v, ecx, msr_content);
@@ -1833,19 +1787,11 @@ void vmx_vlapic_msr_changed(struct vcpu 
     vmx_vmcs_exit(v);
 }
 
-extern bool_t mtrr_var_range_msr_set(struct mtrr_state *v,
-        u32 msr, u64 msr_content);
-extern bool_t mtrr_fix_range_msr_set(struct mtrr_state *v,
-        int row, u64 msr_content);
-extern bool_t mtrr_def_type_msr_set(struct mtrr_state *v, u64 msr_content);
-extern bool_t pat_msr_set(u64 *pat, u64 msr);
-
 static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
 {
     u32 ecx = regs->ecx;
     u64 msr_content;
     struct vcpu *v = current;
-    int index;
 
     HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
                 ecx, (u32)regs->eax, (u32)regs->edx);
@@ -1856,10 +1802,6 @@ static int vmx_msr_write_intercept(struc
 
     switch ( ecx )
     {
-    case MSR_IA32_TSC:
-        hvm_set_guest_time(v, msr_content);
-        pt_reset(v);
-        break;
     case MSR_IA32_SYSENTER_CS:
         __vmwrite(GUEST_SYSENTER_CS, msr_content);
         break;
@@ -1869,41 +1811,6 @@ static int vmx_msr_write_intercept(struc
     case MSR_IA32_SYSENTER_EIP:
         __vmwrite(GUEST_SYSENTER_EIP, msr_content);
         break;
-    case MSR_IA32_APICBASE:
-        vlapic_msr_set(vcpu_vlapic(v), msr_content);
-        break;
-    case MSR_IA32_CR_PAT:
-        if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) )
-           goto gp_fault;
-        break;
-    case MSR_MTRRdefType:
-        if ( !mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, msr_content) )
-           goto gp_fault;
-        break;
-    case MSR_MTRRfix64K_00000:
-        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, 0, msr_content) )
-            goto gp_fault;
-        break;
-    case MSR_MTRRfix16K_80000:
-    case MSR_MTRRfix16K_A0000:
-        index = regs->ecx - MSR_MTRRfix16K_80000 + 1;
-        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
-                                     index, msr_content) )
-            goto gp_fault;
-        break;
-    case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
-        index = regs->ecx - MSR_MTRRfix4K_C0000 + 3;
-        if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
-                                     index, msr_content) )
-            goto gp_fault;
-        break;
-    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
-        if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr,
-                                     regs->ecx, msr_content) )
-            goto gp_fault;
-        break;
-    case MSR_MTRRcap:
-        goto gp_fault;
     case MSR_IA32_DEBUGCTLMSR: {
         int i, rc = 0;
 
@@ -2330,12 +2237,12 @@ asmlinkage void vmx_vmexit_handler(struc
         break;
     case EXIT_REASON_MSR_READ:
         inst_len = __get_instruction_length(); /* Safe: RDMSR */
-        if ( vmx_msr_read_intercept(regs) == X86EMUL_OKAY )
+        if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY )
             __update_guest_eip(inst_len);
         break;
     case EXIT_REASON_MSR_WRITE:
         inst_len = __get_instruction_length(); /* Safe: WRMSR */
-        if ( vmx_msr_write_intercept(regs) == X86EMUL_OKAY )
+        if ( hvm_msr_write_intercept(regs) == X86EMUL_OKAY )
             __update_guest_eip(inst_len);
         break;
 
diff -r 06242949ff56 -r cd5dc735bdf3 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/arch/x86/mm.c Wed Apr 16 13:36:44 2008 +0100
@@ -3279,15 +3279,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
         case XENMAPSPACE_shared_info:
             if ( xatp.idx == 0 )
                 mfn = virt_to_mfn(d->shared_info);
-            /* XXX: assumption here, this is called after E820 table is build
-             * need the E820 to initialize MTRR.
-             */
-            if ( is_hvm_domain(d) ) {
-                extern void init_mtrr_in_hyper(struct vcpu *);
-                struct vcpu *vs;
-                for_each_vcpu(d, vs)
-                    init_mtrr_in_hyper(vs);
-            }
             break;
         case XENMAPSPACE_grant_table:
             spin_lock(&d->grant_table->lock);
diff -r 06242949ff56 -r cd5dc735bdf3 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Apr 16 13:36:44 2008 +0100
@@ -139,6 +139,8 @@ int hvm_vcpu_initialise(struct vcpu *v);
 int hvm_vcpu_initialise(struct vcpu *v);
 void hvm_vcpu_destroy(struct vcpu *v);
 void hvm_vcpu_down(struct vcpu *v);
+int hvm_vcpu_cacheattr_init(struct vcpu *v);
+void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
 
 void hvm_send_assist_req(struct vcpu *v);
 
diff -r 06242949ff56 -r cd5dc735bdf3 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Apr 16 13:36:44 2008 +0100
@@ -130,5 +130,7 @@ int hvm_set_cr0(unsigned long value);
 int hvm_set_cr0(unsigned long value);
 int hvm_set_cr3(unsigned long value);
 int hvm_set_cr4(unsigned long value);
+int hvm_msr_read_intercept(struct cpu_user_regs *regs);
+int hvm_msr_write_intercept(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 06242949ff56 -r cd5dc735bdf3 xen/include/asm-x86/mtrr.h
--- a/xen/include/asm-x86/mtrr.h        Wed Apr 16 10:21:08 2008 +0100
+++ b/xen/include/asm-x86/mtrr.h        Wed Apr 16 13:36:44 2008 +0100
@@ -11,13 +11,6 @@
 #define MTRR_TYPE_WRBACK     6
 #define MTRR_NUM_TYPES       7
 #define MEMORY_NUM_TYPES     MTRR_NUM_TYPES
-
-#define MTRR_PHYSMASK_VALID_BIT  11
-#define MTRR_PHYSMASK_SHIFT      12
-
-#define MTRR_PHYSBASE_TYPE_MASK  0xff   /* lowest 8 bits */
-#define MTRR_PHYSBASE_SHIFT      12
-#define MTRR_VCNT            8
 
 #define NORMAL_CACHE_MODE          0
 #define NO_FILL_CACHE_MODE         2
@@ -58,7 +51,6 @@ struct mtrr_state {
        u64       mtrr_cap;
        /* ranges in var MSRs are overlapped or not:0(no overlapped) */
        bool_t    overlapped;
-       bool_t    is_initialized;
 };
 
 extern void mtrr_save_fixed_ranges(void *);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.