[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: fix (and simplify) MTRR overlap checking



Obtaining one individual range per variable range register (via
get_mtrr_range()) was bogus from the beginning, as these registers may
cover multiple disjoint ranges. Do away with that, in favor of simply
comparing masked addresses.

Also, for is_var_mtrr_overlapped()'s result to be correct when called
from mtrr_wrmsr(), generic_set_mtrr() must update saved state first.

As minor cleanup changes, constify is_var_mtrr_overlapped()'s parameter
and make mtrr_wrmsr() static.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -220,7 +220,7 @@ void __init mtrr_state_warn(void)
 /* Doesn't attempt to pass an error out to MTRR users
    because it's quite complicated in some cases and probably not
    worth it because the best error handling is to ignore it. */
-void mtrr_wrmsr(unsigned int msr, uint64_t msr_content)
+static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content)
 {
        if (wrmsr_safe(msr, msr_content) < 0)
                printk(KERN_ERR
@@ -495,8 +495,8 @@ static void generic_set_mtrr(unsigned in
        if (size == 0) {
                /* The invalid bit is kept in the mask, so we simply clear the
                   relevant mask register to disable a range. */
+               memset(vr, 0, sizeof(*vr));
                mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0);
-               memset(vr, 0, sizeof(struct mtrr_var_range));
        } else {
                uint32_t base_lo, base_hi, mask_lo, mask_hi;
 
--- a/xen/arch/x86/cpu/mtrr/mtrr.h
+++ b/xen/arch/x86/cpu/mtrr/mtrr.h
@@ -63,7 +63,6 @@ extern const struct mtrr_ops *mtrr_if;
 extern unsigned int num_var_ranges;
 
 void mtrr_state_warn(void);
-void mtrr_wrmsr(unsigned int msr, uint64_t msr_content);
 
 extern int amd_init_mtrr(void);
 extern int cyrix_init_mtrr(void);
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -26,8 +26,6 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/cacheattr.h>
 
-static uint32_t size_or_mask;
-
 /* Get page attribute fields (PAn) from PAT MSR. */
 #define pat_cr_2_paf(pat_cr,n)  ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
 
@@ -77,61 +75,28 @@ static uint8_t __read_mostly mtrr_epat_t
 static uint8_t __read_mostly pat_entry_tbl[PAT_TYPE_NUMS] =
     { [0 ... PAT_TYPE_NUMS-1] = INVALID_MEM_TYPE };
 
-static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
-                           uint64_t *base, uint64_t *end)
+bool_t is_var_mtrr_overlapped(const struct mtrr_state *m)
 {
-    uint32_t mask_lo = (uint32_t)mask_msr;
-    uint32_t mask_hi = (uint32_t)(mask_msr >> 32);
-    uint32_t base_lo = (uint32_t)base_msr;
-    uint32_t base_hi = (uint32_t)(base_msr >> 32);
-    uint32_t size;
-
-    if ( !(mask_lo & MTRR_PHYSMASK_VALID) )
-    {
-        /* Invalid (i.e. free) range */
-        *base = 0;
-        *end = 0;
-        return;
-    }
-
-    /* Work out the shifted address mask. */
-    mask_lo = (size_or_mask | (mask_hi << (32 - PAGE_SHIFT)) |
-               (mask_lo >> PAGE_SHIFT));
-
-    /* This works correctly if size is a power of two (a contiguous range). */
-    size = -mask_lo;
-    *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
-    *end = *base + size - 1;
-}
-
-bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
-{
-    int32_t seg, i;
-    uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
-    uint64_t base_pre, end_pre, base, end;
-    uint8_t num_var_ranges = (uint8_t)m->mtrr_cap;
+    unsigned int seg, i;
+    unsigned int num_var_ranges = (uint8_t)m->mtrr_cap;
 
     for ( i = 0; i < num_var_ranges; i++ )
     {
-        phys_base_pre = ((uint64_t*)m->var_ranges)[i*2];
-        phys_mask_pre = ((uint64_t*)m->var_ranges)[i*2 + 1];
+        uint64_t base1 = m->var_ranges[i].base >> PAGE_SHIFT;
+        uint64_t mask1 = m->var_ranges[i].mask >> PAGE_SHIFT;
 
-        get_mtrr_range(phys_base_pre, phys_mask_pre,
-                        &base_pre, &end_pre);
+        if ( !(m->var_ranges[i].mask & MTRR_PHYSMASK_VALID) )
+            continue;
 
         for ( seg = i + 1; seg < num_var_ranges; seg ++ )
         {
-            phys_base = ((uint64_t*)m->var_ranges)[seg*2];
-            phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
+            uint64_t base2 = m->var_ranges[seg].base >> PAGE_SHIFT;
+            uint64_t mask2 = m->var_ranges[seg].mask >> PAGE_SHIFT;
 
-            get_mtrr_range(phys_base, phys_mask,
-                            &base, &end);
+            if ( !(m->var_ranges[seg].mask & MTRR_PHYSMASK_VALID) )
+                continue;
 
-            if ( ((base_pre != end_pre) && (base != end))
-                 || ((base >= base_pre) && (base <= end_pre))
-                 || ((end >= base_pre) && (end <= end_pre))
-                 || ((base_pre >= base) && (base_pre <= end))
-                 || ((end_pre >= base) && (end_pre <= end)) )
+            if ( (base1 & mask1 & mask2) == (base2 & mask2 & mask1) )
             {
                 /* MTRR is overlapped. */
                 return 1;
@@ -168,8 +133,6 @@ static int __init hvm_mtrr_pat_init(void
         }
     }
 
-    size_or_mask = ~((1 << (paddr_bits - PAGE_SHIFT)) - 1);
-
     return 0;
 }
 __initcall(hvm_mtrr_pat_init);
--- a/xen/include/asm-x86/mtrr.h
+++ b/xen/include/asm-x86/mtrr.h
@@ -91,7 +91,7 @@ extern bool_t mtrr_def_type_msr_set(stru
 extern void memory_type_changed(struct domain *);
 extern bool_t pat_msr_set(uint64_t *pat, uint64_t msr);
 
-bool_t is_var_mtrr_overlapped(struct mtrr_state *m);
+bool_t is_var_mtrr_overlapped(const struct mtrr_state *m);
 bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs);
 
 #endif /* __ASM_X86_MTRR_H__ */


Attachment: x86-MTRR-overlap.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.