[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] cpufreq: domain structure update from array to linked list



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1225275253 0
# Node ID 2c7665f04038e4282e4f7de11716ba441d0f12c7
# Parent  19549b9766fdd68380ded8efd975c41269ab2801
cpufreq: domain structure update from array to linked list

Current cpufreq domain is an array cpufreq_dom_map[NR_CPUS].
However, domain number may be sparse or bigger than NR_CPUS.

Signed-off-by: Jinsong Liu <jinsong.liu@xxxxxxxxx>
---
 xen/drivers/cpufreq/cpufreq.c |   99 ++++++++++++++++++++++++++++++------------
 1 files changed, 72 insertions(+), 27 deletions(-)

diff -r 19549b9766fd -r 2c7665f04038 xen/drivers/cpufreq/cpufreq.c
--- a/xen/drivers/cpufreq/cpufreq.c     Tue Oct 28 11:25:20 2008 +0000
+++ b/xen/drivers/cpufreq/cpufreq.c     Wed Oct 29 10:14:13 2008 +0000
@@ -31,6 +31,7 @@
 #include <xen/errno.h>
 #include <xen/delay.h>
 #include <xen/cpumask.h>
+#include <xen/list.h>
 #include <xen/sched.h>
 #include <xen/timer.h>
 #include <xen/xmalloc.h>
@@ -44,8 +45,12 @@
 #include <acpi/acpi.h>
 #include <acpi/cpufreq/cpufreq.h>
 
-/* TODO: change to link list later as domain number may be sparse */
-static cpumask_t cpufreq_dom_map[NR_CPUS];
+struct cpufreq_dom {
+    unsigned int       dom;
+    cpumask_t          map;
+    struct list_head   node;
+};
+static LIST_HEAD(cpufreq_dom_list_head);
 
 int cpufreq_limit_change(unsigned int cpu)
 {
@@ -72,48 +77,71 @@ int cpufreq_add_cpu(unsigned int cpu)
 {
     int ret = 0;
     unsigned int firstcpu;
-    unsigned int dom;
+    unsigned int dom, domexist = 0;
     unsigned int j;
+    struct list_head *pos;
+    struct cpufreq_dom *cpufreq_dom;
     struct cpufreq_policy new_policy;
     struct cpufreq_policy *policy;
     struct processor_performance *perf = &processor_pminfo[cpu]->perf;
 
     /* to protect the case when Px was not controlled by xen */
-    if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
+    if (!processor_pminfo[cpu]      ||
+        !(perf->init & XEN_PX_INIT) ||
+        !cpu_online(cpu))
+        return -EINVAL;
+
+    if (cpufreq_cpu_policy[cpu])
         return 0;
-
-    if (!cpu_online(cpu) || cpufreq_cpu_policy[cpu])
-        return -EINVAL;
 
     ret = cpufreq_statistic_init(cpu);
     if (ret)
         return ret;
 
     dom = perf->domain_info.domain;
-    if (cpus_weight(cpufreq_dom_map[dom])) {
+
+    list_for_each(pos, &cpufreq_dom_list_head) {
+        cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
+        if (dom == cpufreq_dom->dom) {
+            domexist = 1;
+            break;
+        }
+    }
+
+    if (domexist) {
         /* share policy with the first cpu since on same boat */
-        firstcpu = first_cpu(cpufreq_dom_map[dom]);
+        firstcpu = first_cpu(cpufreq_dom->map);
         policy = cpufreq_cpu_policy[firstcpu];
 
         cpufreq_cpu_policy[cpu] = policy;
-        cpu_set(cpu, cpufreq_dom_map[dom]);
+        cpu_set(cpu, cpufreq_dom->map);
         cpu_set(cpu, policy->cpus);
 
         printk(KERN_EMERG"adding CPU %u\n", cpu);
     } else {
+        cpufreq_dom = xmalloc(struct cpufreq_dom);
+        if (!cpufreq_dom) {
+            cpufreq_statistic_exit(cpu);
+            return -ENOMEM;
+        }
+        memset(cpufreq_dom, 0, sizeof(struct cpufreq_dom));
+        cpufreq_dom->dom = dom;
+        cpu_set(cpu, cpufreq_dom->map);
+        list_add(&cpufreq_dom->node, &cpufreq_dom_list_head);
+
         /* for the first cpu, setup policy and do init work */
         policy = xmalloc(struct cpufreq_policy);
         if (!policy) {
+            list_del(&cpufreq_dom->node);
+            xfree(cpufreq_dom);
             cpufreq_statistic_exit(cpu);
             return -ENOMEM;
         }
         memset(policy, 0, sizeof(struct cpufreq_policy));
-
+        policy->cpu = cpu;
+        cpu_set(cpu, policy->cpus);
         cpufreq_cpu_policy[cpu] = policy;
-        cpu_set(cpu, cpufreq_dom_map[dom]);
-        cpu_set(cpu, policy->cpus);
-
-        policy->cpu = cpu;
+
         ret = cpufreq_driver->init(policy);
         if (ret)
             goto err1;
@@ -124,7 +152,7 @@ int cpufreq_add_cpu(unsigned int cpu)
      * After get full cpumap of the coordination domain,
      * we can safely start gov here.
      */
-    if (cpus_weight(cpufreq_dom_map[dom]) ==
+    if (cpus_weight(cpufreq_dom->map) ==
         perf->domain_info.num_processors) {
         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
         policy->governor = NULL;
@@ -138,51 +166,68 @@ err2:
 err2:
     cpufreq_driver->exit(policy);
 err1:
-    for_each_cpu_mask(j, cpufreq_dom_map[dom]) {
+    for_each_cpu_mask(j, cpufreq_dom->map) {
         cpufreq_cpu_policy[j] = NULL;
         cpufreq_statistic_exit(j);
     }
 
-    cpus_clear(cpufreq_dom_map[dom]);
+    list_del(&cpufreq_dom->node);
+    xfree(cpufreq_dom);
     xfree(policy);
     return ret;
 }
 
 int cpufreq_del_cpu(unsigned int cpu)
 {
-    unsigned int dom;
+    unsigned int dom, domexist = 0;
+    struct list_head *pos;
+    struct cpufreq_dom *cpufreq_dom;
     struct cpufreq_policy *policy;
     struct processor_performance *perf = &processor_pminfo[cpu]->perf;
 
     /* to protect the case when Px was not controlled by xen */
-    if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
+    if (!processor_pminfo[cpu]      ||
+        !(perf->init & XEN_PX_INIT) ||
+        !cpu_online(cpu))
+        return -EINVAL;
+
+    if (!cpufreq_cpu_policy[cpu])
         return 0;
-
-    if (!cpu_online(cpu) || !cpufreq_cpu_policy[cpu])
-        return -EINVAL;
 
     dom = perf->domain_info.domain;
     policy = cpufreq_cpu_policy[cpu];
 
-    printk(KERN_EMERG"deleting CPU %u\n", cpu);
+    list_for_each(pos, &cpufreq_dom_list_head) {
+        cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
+        if (dom == cpufreq_dom->dom) {
+            domexist = 1;
+            break;
+        }
+    }
+
+    if (!domexist)
+        return -EINVAL;
 
     /* for the first cpu of the domain, stop gov */
-    if (cpus_weight(cpufreq_dom_map[dom]) ==
+    if (cpus_weight(cpufreq_dom->map) ==
         perf->domain_info.num_processors)
         __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 
     cpufreq_cpu_policy[cpu] = NULL;
     cpu_clear(cpu, policy->cpus);
-    cpu_clear(cpu, cpufreq_dom_map[dom]);
+    cpu_clear(cpu, cpufreq_dom->map);
     cpufreq_statistic_exit(cpu);
 
     /* for the last cpu of the domain, clean room */
     /* It's safe here to free freq_table, drv_data and policy */
-    if (!cpus_weight(cpufreq_dom_map[dom])) {
+    if (!cpus_weight(cpufreq_dom->map)) {
         cpufreq_driver->exit(policy);
+        list_del(&cpufreq_dom->node);
+        xfree(cpufreq_dom);
         xfree(policy);
     }
 
+    printk(KERN_EMERG"deleting CPU %u\n", cpu);
     return 0;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.