[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.1-testing] x86: tighten checks in XEN_DOMCTL_memory_mapping handler


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-4.1-testing <patchbot@xxxxxxx>
  • Date: Thu, 04 Oct 2012 16:11:10 +0000
  • Delivery-date: Thu, 04 Oct 2012 16:11:17 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1349339895 -7200
# Node ID addf106cc90fb8a82d87964c6e4ffd29863c2bc1
# Parent  ab6d1c5270024005541a2caccad0b518457e510f
x86: tighten checks in XEN_DOMCTL_memory_mapping handler

Properly checking the MFN implies knowing the physical address width
supported by the platform, so to obtain this consistently the
respective code gets moved out of the MTRR subdir.

Btw., the model specific workaround in that code is likely unnecessary
- I believe those CPU models don't support 64-bit mode. But I wasn't
able to formally verify this, so I preferred to retain that code for
now.

But domctl code here also was lacking other error checks (as was,
looking at it again from that angle) the XEN_DOMCTL_ioport_mapping one.
Besides adding the missing checks, printing is also added for the case
where revoking access permissions didn't work (as that may have
implications for the host operator, e.g. wanting to not pass through
affected devices to another guest until the one previously using them
did actually die).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
xen-unstable changeset: 25931:149805919569
xen-unstable date: Thu Sep 20 07:21:53 UTC 2012
---


diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/cpu/centaur.c
--- a/xen/arch/x86/cpu/centaur.c        Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/cpu/centaur.c        Thu Oct 04 10:38:15 2012 +0200
@@ -56,6 +56,9 @@ static void __init init_c3(struct cpuinf
        if (c->x86_model >=6 && c->x86_model <9)
                set_bit(X86_FEATURE_3DNOW, c->x86_capability);
 
+       if (cpuid_eax(0x80000000) < 0x80000008)
+               paddr_bits = 32;
+
        get_model_name(c);
        display_cacheinfo(c);
 }
diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/cpu/common.c Thu Oct 04 10:38:15 2012 +0200
@@ -43,6 +43,8 @@ integer_param("cpuid_mask_ext_edx", opt_
 
 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
 
+unsigned int paddr_bits __read_mostly = 36;
+
 /*
  * Default host IA32_CR_PAT value to cover all memory types.
  * BIOS usually sets it to 0x07040600070406.
@@ -319,6 +321,8 @@ void __cpuinit generic_identify(struct c
                        }
                        if ( xlvl >= 0x80000004 )
                                get_model_name(c); /* Default name */
+                       if ( xlvl >= 0x80000008 )
+                               paddr_bits = cpuid_eax(0x80000008) & 0xff;
                }
 
                /* Intel-defined flags: level 0x00000007 */
diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/cpu/cyrix.c
--- a/xen/arch/x86/cpu/cyrix.c  Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/cpu/cyrix.c  Thu Oct 04 10:38:15 2012 +0200
@@ -303,7 +303,9 @@ static void __init init_cyrix(struct cpu
        }
        safe_strcpy(c->x86_model_id, Cx86_model[dir0_msn & 7]);
        if (p) safe_strcat(c->x86_model_id, p);
-       return;
+
+       if (cpu_has_cyrix_arr)
+               paddr_bits = 32;
 }
 
 /*
diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c  Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/cpu/intel.c  Thu Oct 04 10:38:15 2012 +0200
@@ -145,6 +145,11 @@ void __devinit early_intel_workaround(st
                        printk("revised cpuid_level = %d\n", c->cpuid_level);
                }
        }
+
+       /* CPUID workaround for Intel 0F33/0F34 CPU */
+       if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
+           (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
+               paddr_bits = 36;
 }
 
 /*
diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/cpu/mtrr/main.c      Thu Oct 04 10:38:15 2012 +0200
@@ -600,8 +600,6 @@ struct mtrr_value {
        unsigned long   lsize;
 };
 
-unsigned int paddr_bits __read_mostly = 36;
-
 /**
  * mtrr_bp_init - initialize mtrrs on the boot CPU
  *
@@ -615,48 +613,12 @@ void __init mtrr_bp_init(void)
 
        if (cpu_has_mtrr) {
                mtrr_if = &generic_mtrr_ops;
-               size_or_mask = 0xff000000;      /* 36 bits */
-               size_and_mask = 0x00f00000;
-
-               /* This is an AMD specific MSR, but we assume(hope?) that
-                  Intel will implement it to when they extend the address
-                  bus of the Xeon. */
-               if (cpuid_eax(0x80000000) >= 0x80000008) {
-                       paddr_bits = cpuid_eax(0x80000008) & 0xff;
-                       /* CPUID workaround for Intel 0F33/0F34 CPU */
-                       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-                           boot_cpu_data.x86 == 0xF &&
-                           boot_cpu_data.x86_model == 0x3 &&
-                           (boot_cpu_data.x86_mask == 0x3 ||
-                            boot_cpu_data.x86_mask == 0x4))
-                               paddr_bits = 36;
-
-                       size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 
1);
-                       size_and_mask = ~size_or_mask & 0xfffff00000ULL;
-               } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
-                          boot_cpu_data.x86 == 6) {
-                       /* VIA C* family have Intel style MTRRs, but
-                          don't support PAE */
-                       size_or_mask = 0xfff00000;      /* 32 bits */
-                       size_and_mask = 0;
-               }
        } else {
 #ifndef CONFIG_X86_64
                switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_AMD:
-                       if (cpu_has_k6_mtrr) {
-                               /* Pre-Athlon (K6) AMD CPU MTRRs */
-                               mtrr_if = mtrr_ops[X86_VENDOR_AMD];
-                               size_or_mask = 0xfff00000;      /* 32 bits */
-                               size_and_mask = 0;
-                       }
-                       break;
                case X86_VENDOR_CYRIX:
-                       if (cpu_has_cyrix_arr) {
+                       if (cpu_has_cyrix_arr)
                                mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
-                               size_or_mask = 0xfff00000;      /* 32 bits */
-                               size_and_mask = 0;
-                       }
                        break;
                default:
                        break;
@@ -665,6 +627,8 @@ void __init mtrr_bp_init(void)
        }
 
        if (mtrr_if) {
+               size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
+               size_and_mask = ~size_or_mask & 0xfffff00000ULL;
                set_num_var_ranges();
                init_table();
                if (use_intel())
diff -r ab6d1c527002 -r addf106cc90f xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Thu Oct 04 10:37:19 2012 +0200
+++ b/xen/arch/x86/domctl.c     Thu Oct 04 10:38:15 2012 +0200
@@ -962,10 +962,12 @@ long arch_do_domctl(
         unsigned long gfn = domctl->u.memory_mapping.first_gfn;
         unsigned long mfn = domctl->u.memory_mapping.first_mfn;
         unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
-        int i;
+        unsigned long i;
 
         ret = -EINVAL;
-        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+        if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
+             ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
+             (gfn + nr_mfns - 1) < gfn ) /* wrap? */
             break;
 
         ret = -EPERM;
@@ -985,18 +987,47 @@ long arch_do_domctl(
                 gfn, mfn, nr_mfns);
 
             ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
-            for ( i = 0; i < nr_mfns; i++ )
-                set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i));
+            if ( !ret && paging_mode_translate(d) )
+            {
+                struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+                for ( i = 0; !ret && i < nr_mfns; i++ )
+                    if ( !set_mmio_p2m_entry(p2m, gfn + i, _mfn(mfn + i)) )
+                        ret = -EIO;
+                if ( ret )
+                {
+                    printk(XENLOG_G_WARNING
+                           "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
+                           d->domain_id, gfn + i, mfn + i);
+                    while ( i-- )
+                        clear_mmio_p2m_entry(p2m, gfn + i);
+                    if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
+                         IS_PRIV(current->domain) )
+                        printk(XENLOG_ERR
+                               "memory_map: failed to deny dom%d access to 
[%lx,%lx]\n",
+                               d->domain_id, mfn, mfn + nr_mfns - 1);
+                }
+            }
         }
         else
         {
+            bool_t acc = 0;
+
             gdprintk(XENLOG_INFO,
                 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
                  gfn, mfn, nr_mfns);
 
-            for ( i = 0; i < nr_mfns; i++ )
-                clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i);
+            if ( paging_mode_translate(d) )
+                for ( i = 0; i < nr_mfns; i++ )
+                    acc |= !clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn + i);
             ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+            if ( !ret && acc )
+                ret = -EIO;
+            if ( ret && IS_PRIV(current->domain) )
+                printk(XENLOG_ERR
+                       "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
+                       ret, acc ? "removing" : "denying", d->domain_id,
+                       mfn, mfn + nr_mfns - 1);
         }
 
         rcu_unlock_domain(d);
@@ -1051,12 +1082,23 @@ long arch_do_domctl(
             if ( !found )
             {
                 g2m_ioport = xmalloc(struct g2m_ioport);
+                if ( !g2m_ioport )
+                    ret = -ENOMEM;
+            }
+            if ( !found && !ret )
+            {
                 g2m_ioport->gport = fgp;
                 g2m_ioport->mport = fmp;
                 g2m_ioport->np = np;
                 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
             }
-            ret = ioports_permit_access(d, fmp, fmp + np - 1);
+            if ( !ret )
+                ret = ioports_permit_access(d, fmp, fmp + np - 1);
+            if ( ret && !found && g2m_ioport )
+            {
+                list_del(&g2m_ioport->list);
+                xfree(g2m_ioport);
+            }
         }
         else
         {
@@ -1071,6 +1113,10 @@ long arch_do_domctl(
                     break;
                 }
             ret = ioports_deny_access(d, fmp, fmp + np - 1);
+            if ( ret && IS_PRIV(current->domain) )
+                printk(XENLOG_ERR
+                       "ioport_map: error %ld denying dom%d access to 
[%x,%x]\n",
+                       ret, d->domain_id, fmp, fmp + np - 1);
         }
         rcu_unlock_domain(d);
     }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.