[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/sched: Remove d->is_pinned



commit 905d7340f6d0abfbf986cede6e535c51ab1de3c3
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Apr 1 10:08:43 2019 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Apr 1 16:42:59 2019 +0100

    xen/sched: Remove d->is_pinned
    
    The is_pinned field is rather odd.  It can only be activated with the
    "dom0_vcpus_pin" command line option, and causes dom0 (or the late hwdom) to
    have its vcpus identity pinned to pcpus.
    
    Having dom0_vcpus_pin active disallows the use of vcpu_set_hard_affinity().
    However, when a pcpu is offlined, or moved between cpupools, the affinity is
    broken and reverts to cpumask_all.  This results in vcpus which are no 
longer
    pinned, and cannot be adjusted.
    
    A related bit of functionality is the is_pinned_vcpu() predicate.  This is
    only used by x86 code, and permits the use of VCPUOP_get_physid and 
writeable
    access to some extra MSRs.
    
    The implementation however returns true for is_pinned (which will include
    unpinned vcpus from the above scenario), *or* if the hard affinity mask only
    has a single bit set (which is redundant with the intended effect of
    is_pinned, but also includes other domains).
    
    Rework the behaviour of "dom0_vcpus_pin" to only being an initial pinning
    configuration, and permit full adjustment.  This allows the user to
    reconfigure dom0 after the fact or fix up from the fallout of cpu hot unplug
    and cpupool manipulation.
    
    An unprivileged domain has no business using VCPUOP_get_physid, and 
shouldn't
    be able to just because it happens to be pinned by admin choice.  All uses 
of
    is_pinned_vcpu() should be restricted to the hardware domain, so rename it 
to
    is_hwdom_pinned_vcpu() to avoid future misuse.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/arch/x86/dom0_build.c      |  2 +-
 xen/arch/x86/domain.c          |  2 +-
 xen/arch/x86/pv/emul-priv-op.c |  9 ++++-----
 xen/common/domain.c            |  1 -
 xen/common/schedule.c          |  5 +----
 xen/include/xen/sched.h        | 10 ++++++----
 6 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 6ebe36766b..73f5407b0d 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -213,7 +213,7 @@ struct vcpu *__init dom0_setup_vcpu(struct domain *d,
         }
         else
         {
-            if ( !d->is_pinned && !dom0_affinity_relaxed )
+            if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed )
                 sched_set_affinity(v, &dom0_cpus, NULL);
             sched_set_affinity(v, NULL, &dom0_cpus);
         }
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8d579e2cf9..20b86fd568 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1229,7 +1229,7 @@ arch_do_vcpu_op(
         struct vcpu_get_physid cpu_id;
 
         rc = -EINVAL;
-        if ( !is_pinned_vcpu(v) )
+        if ( !is_hwdom_pinned_vcpu(v) )
             break;
 
         cpu_id.phys_id =
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 3746e2ad54..84ce67c641 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -1017,7 +1017,7 @@ static int write_msr(unsigned int reg, uint64_t val,
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
              boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
             break;
-        if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+        if ( !is_hwdom_pinned_vcpu(curr) )
             return X86EMUL_OKAY;
         if ( (rdmsr_safe(MSR_AMD64_NB_CFG, temp) != 0) ||
              ((val ^ temp) & ~(1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
@@ -1030,7 +1030,7 @@ static int write_msr(unsigned int reg, uint64_t val,
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
              boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
             break;
-        if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+        if ( !is_hwdom_pinned_vcpu(curr) )
             return X86EMUL_OKAY;
         if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, temp) != 0 )
             break;
@@ -1050,7 +1050,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_IA32_UCODE_REV:
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
             break;
-        if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+        if ( !is_hwdom_pinned_vcpu(curr) )
             return X86EMUL_OKAY;
         if ( rdmsr_safe(reg, temp) )
             break;
@@ -1086,8 +1086,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_IA32_ENERGY_PERF_BIAS:
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
             break;
-        if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) ||
-             wrmsr_safe(reg, val) == 0 )
+        if ( !is_hwdom_pinned_vcpu(curr) || wrmsr_safe(reg, val) == 0 )
             return X86EMUL_OKAY;
         break;
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 3b18f11f12..a1f8bb4f2f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -342,7 +342,6 @@ struct domain *domain_create(domid_t domid,
         if ( hardware_domid < 0 || hardware_domid >= DOMID_FIRST_RESERVED )
             panic("The value of hardware_dom must be a valid domain ID\n");
 
-        d->is_pinned = opt_dom0_vcpus_pin;
         d->disable_migrate = 1;
         old_hwdom = hardware_domain;
         hardware_domain = d;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 60755a631e..76d60785e2 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -276,7 +276,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
      * Initialize affinity settings. The idler, and potentially
      * domain-0 VCPUs, are pinned onto their respective physical CPUs.
      */
-    if ( is_idle_domain(d) || d->is_pinned )
+    if ( is_idle_domain(d) || (is_hardware_domain(d) && opt_dom0_vcpus_pin) )
         sched_set_affinity(v, cpumask_of(processor), &cpumask_all);
     else
         sched_set_affinity(v, &cpumask_all, &cpumask_all);
@@ -958,9 +958,6 @@ int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t 
*affinity)
     cpumask_t online_affinity;
     cpumask_t *online;
 
-    if ( v->domain->is_pinned )
-        return -EINVAL;
-
     online = VCPU2ONLINE(v);
     cpumask_and(&online_affinity, affinity, online);
     if ( cpumask_empty(&online_affinity) )
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index edee52dfe4..6d23b6d873 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -381,8 +381,6 @@ struct domain
     bool             is_console;
     /* Is this a xenstore domain (not dom0)? */
     bool             is_xenstore;
-    /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
-    bool             is_pinned;
     /* Non-migratable and non-restoreable? */
     bool             disable_migrate;
     /* Is this guest being debugged by dom0? */
@@ -961,8 +959,12 @@ static inline bool is_hvm_vcpu(const struct vcpu *v)
     return is_hvm_domain(v->domain);
 }
 
-#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
-                           cpumask_weight((v)->cpu_hard_affinity) == 1)
+static inline bool is_hwdom_pinned_vcpu(const struct vcpu *v)
+{
+    return (is_hardware_domain(v->domain) &&
+            cpumask_weight(v->cpu_hard_affinity) == 1);
+}
+
 #ifdef CONFIG_HAS_PASSTHROUGH
 #define has_iommu_pt(d) (dom_iommu(d)->status != IOMMU_STATUS_disabled)
 #define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.