|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] reflect cpupool in numa node affinity
# HG changeset patch
# User Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
# Date 1327414872 0
# Node ID 0f8f633dbe2d182d448214fefb4e2e3afaa75e7e
# Parent 455a50622fb2a135e6bbfce8705fddc8705fe2d7
reflect cpupool in numa node affinity
In order to prefer node local memory for a domain the numa node
locality info must be built according to the cpus belonging to the
cpupool of the domain.
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx
Committed-by: Keir Fraser <keir@xxxxxxx>
---
diff -r 455a50622fb2 -r 0f8f633dbe2d xen/common/cpupool.c
--- a/xen/common/cpupool.c Tue Jan 24 14:20:40 2012 +0000
+++ b/xen/common/cpupool.c Tue Jan 24 14:21:12 2012 +0000
@@ -220,6 +220,7 @@
{
int ret;
struct cpupool *old;
+ struct domain *d;
if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
return -EBUSY;
@@ -240,6 +241,14 @@
cpupool_cpu_moving = NULL;
}
cpumask_set_cpu(cpu, c->cpu_valid);
+
+ rcu_read_lock(&domlist_read_lock);
+ for_each_domain_in_cpupool(d, c)
+ {
+ domain_update_node_affinity(d);
+ }
+ rcu_read_unlock(&domlist_read_lock);
+
return 0;
}
diff -r 455a50622fb2 -r 0f8f633dbe2d xen/common/domain.c
--- a/xen/common/domain.c Tue Jan 24 14:20:40 2012 +0000
+++ b/xen/common/domain.c Tue Jan 24 14:21:12 2012 +0000
@@ -11,6 +11,7 @@
#include <xen/ctype.h>
#include <xen/errno.h>
#include <xen/sched.h>
+#include <xen/sched-if.h>
#include <xen/domain.h>
#include <xen/mm.h>
#include <xen/event.h>
@@ -335,17 +336,29 @@
void domain_update_node_affinity(struct domain *d)
{
cpumask_var_t cpumask;
+ cpumask_var_t online_affinity;
+ const cpumask_t *online;
nodemask_t nodemask = NODE_MASK_NONE;
struct vcpu *v;
unsigned int node;
if ( !zalloc_cpumask_var(&cpumask) )
return;
+ if ( !alloc_cpumask_var(&online_affinity) )
+ {
+ free_cpumask_var(cpumask);
+ return;
+ }
+
+ online = cpupool_online_cpumask(d->cpupool);
spin_lock(&d->node_affinity_lock);
for_each_vcpu ( d, v )
- cpumask_or(cpumask, cpumask, v->cpu_affinity);
+ {
+ cpumask_and(online_affinity, v->cpu_affinity, online);
+ cpumask_or(cpumask, cpumask, online_affinity);
+ }
for_each_online_node ( node )
if ( cpumask_intersects(&node_to_cpumask(node), cpumask) )
@@ -354,6 +367,7 @@
d->node_affinity = nodemask;
spin_unlock(&d->node_affinity_lock);
+ free_cpumask_var(online_affinity);
free_cpumask_var(cpumask);
}
diff -r 455a50622fb2 -r 0f8f633dbe2d xen/common/schedule.c
--- a/xen/common/schedule.c Tue Jan 24 14:20:40 2012 +0000
+++ b/xen/common/schedule.c Tue Jan 24 14:21:12 2012 +0000
@@ -280,12 +280,13 @@
SCHED_OP(VCPU2OP(v), insert_vcpu, v);
}
- domain_update_node_affinity(d);
d->cpupool = c;
SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
d->sched_priv = domdata;
+ domain_update_node_affinity(d);
+
domain_unpause(d);
xfree(vcpu_priv);
@@ -535,7 +536,6 @@
struct cpupool *c;
cpumask_t online_affinity;
int ret = 0;
- bool_t affinity_broken;
c = per_cpu(cpupool, cpu);
if ( c == NULL )
@@ -543,8 +543,6 @@
for_each_domain_in_cpupool ( d, c )
{
- affinity_broken = 0;
-
for_each_vcpu ( d, v )
{
vcpu_schedule_lock_irq(v);
@@ -556,7 +554,6 @@
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
cpumask_setall(v->cpu_affinity);
- affinity_broken = 1;
}
if ( v->processor == cpu )
@@ -580,8 +577,7 @@
ret = -EAGAIN;
}
- if ( affinity_broken )
- domain_update_node_affinity(d);
+ domain_update_node_affinity(d);
}
return ret;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |