[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] This patch addresses Rustys' [1]suggestions. Most visibly:
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID a5994e258f777703b46a8433238bd72c8bf8c1cb # Parent 2f75dac09365959d87709d15a181201abf189cb8 This patch addresses Rustys' [1]suggestions. Most visibly: Entries in the store: /cpus/cpuX/online -> /cpu/X/availability Values: 0|1 -> online|offline Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx> diff -r 2f75dac09365 -r a5994e258f77 linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Aug 11 18:03:22 2005 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Aug 11 20:57:09 2005 @@ -1353,83 +1353,82 @@ printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); } -static void handle_cpus_watch(struct xenbus_watch *, const char *); -static struct notifier_block xsn_cpus; +static void handle_vcpu_hotplug_event(struct xenbus_watch *, const char *); +static struct notifier_block xsn_cpu; /* xenbus watch struct */ -static struct xenbus_watch cpus_watch = { - .node = "cpus", - .callback = handle_cpus_watch, +static struct xenbus_watch cpu_watch = { + .node = "cpu", + .callback = handle_vcpu_hotplug_event }; -static int setup_cpus_watcher(struct notifier_block *notifier, +/* NB: Assumes xenbus_lock is held! */ +static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err = 0; - down(&xenbus_lock); - err = register_xenbus_watch(&cpus_watch); - up(&xenbus_lock); + BUG_ON(down_trylock(&xenbus_lock) == 0); + err = register_xenbus_watch(&cpu_watch); if (err) { - printk("Failed to set cpus watcher\n"); - } + printk("Failed to register watch on /cpu\n"); + } + return NOTIFY_DONE; } -static void handle_cpus_watch(struct xenbus_watch *watch, const char *node) +static void handle_vcpu_hotplug_event(struct xenbus_watch *watch, const char *node) { static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL); struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; ssize_t ret; - int err, cpu, state; + int err, cpu; + char state[8]; char dir[32]; char *cpustr; - /* get a pointer to start of cpus/cpu string */ - if ((cpustr = strstr(node, "cpus/cpu")) != NULL) { + /* get a pointer to start of cpu string */ + if ((cpustr = strstr(node, "cpu/")) != NULL) { /* find which cpu state changed, note vcpu for handler */ - sscanf(cpustr, "cpus/cpu%d", &cpu); + sscanf(cpustr, "cpu/%d", &cpu); handler->vcpu = cpu; /* calc the dir for xenbus read */ - sprintf(dir, "cpus/cpu%d", cpu); - - /* make sure watch that was triggered is changes to the online key */ - if ((strcmp(node + strlen(dir), "/online")) != 0) + sprintf(dir, "cpu/%d", cpu); + + /* make sure watch that was triggered is changes to the correct key */ + if ((strcmp(node + strlen(dir), "/availability")) != 0) return; /* get the state value */ - xenbus_transaction_start("cpus"); - err = xenbus_scanf(dir, "online", "%d", &state); + xenbus_transaction_start("cpu"); + err = xenbus_scanf(dir, "availability", "%s", state); xenbus_transaction_end(0); if (err != 1) { printk(KERN_ERR - "XENBUS: Unable to read cpu online state\n"); + "XENBUS: Unable to read cpu state\n"); return; } /* if we detect a state change, take action */ - switch (state) { + if (strcmp(state, "online") == 0) { /* offline -> online */ - case 1: if (!cpu_isset(cpu, cpu_online_map)) { handler->fn = (void *)&cpu_up; ret = schedule_work(&vcpu_hotplug_work); - } - break; + } + } else if (strcmp(state, "offline") == 0) { /* online -> offline */ - case 0: if (cpu_isset(cpu, cpu_online_map)) { handler->fn = (void *)&cpu_down; ret = schedule_work(&vcpu_hotplug_work); - } - break; - default: + } + } else { printk(KERN_ERR - "XENBUS: unknown state(%d) on node(%s)\n", state, + "XENBUS: unknown state(%s) on node(%s)\n", state, node); } } @@ -1438,13 +1437,9 @@ static int __init setup_vcpu_hotplug_event(void) { - xsn_cpus.notifier_call = setup_cpus_watcher; - - if (xen_start_info.store_evtchn) { - setup_cpus_watcher(&xsn_cpus, 0, NULL); - } else { - register_xenstore_notifier(&xsn_cpus); - } + xsn_cpu.notifier_call = setup_cpu_watcher; + + register_xenstore_notifier(&xsn_cpu); return 0; } diff -r 2f75dac09365 -r a5994e258f77 tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Thu Aug 11 18:03:22 2005 +++ b/tools/python/xen/xend/XendDomainInfo.py Thu Aug 11 20:57:09 2005 @@ -557,12 +557,12 @@ def exportVCPUSToDB(self, vcpus): for v in range(0,vcpus): - path = "/cpus/cpu%d"%(v) + path = "/cpu/%d"%(v) if not self.vcpusdb.has_key(path): self.vcpusdb[path] = self.db.addChild(path) db = self.vcpusdb[path] - log.debug("writing key online=1 to path %s in store"%(path)) - db['online'] = "1" + log.debug("writing key availability=online to path %s in store"%(path)) + db['availability'] = "online" db.saveDB(save=True) def init_image(self): @@ -951,16 +951,16 @@ """ db = "" try: - db = self.vcpusdb['/cpus/cpu%d'%(vcpu)] + db = self.vcpusdb['/cpu/%d'%(vcpu)] except: log.error("Invalid VCPU") return if self.store_channel: if int(state) == 0: - db['online'] = "0" + db['availability'] = "offline" else: - db['online'] = "1" + db['availability'] = "online" db.saveDB(save=True) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |