[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] Convert balloon driver to xenstore
The attached patch converts the balloon driver and xend to use xenstore instead of control messages. Note: Because there is no way to set a watch on a non-existent key, this patch includes a workaround to account for the fact that dom0's store keys are not initialized by the tools before it boots. Signed-off-by: Dan Smith <danms@xxxxxxxxxx> diff -r 0db6a59abb95 -r b3ead8c9affb linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Mon Aug 1 10:48:24 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Mon Aug 1 17:03:55 2005 @@ -5,6 +5,7 @@ * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser + * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: @@ -42,7 +43,6 @@ #include <linux/vmalloc.h> #include <asm-xen/xen_proc.h> #include <asm-xen/hypervisor.h> -#include <asm-xen/ctrl_if.h> #include <asm-xen/balloon.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> @@ -50,6 +50,10 @@ #include <asm/tlb.h> #include <linux/list.h> +#include<asm-xen/xenbus.h> + +#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) + static struct proc_dir_entry *balloon_pde; static DECLARE_MUTEX(balloon_mutex); @@ -77,11 +81,17 @@ static DECLARE_WORK(balloon_worker, balloon_process, NULL); static struct timer_list balloon_timer; +/* Flag for dom0 xenstore workaround */ +static int balloon_xenbus_init=0; + +/* Init Function */ +void balloon_init_watcher(void); + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) /* Use the private and mapping fields of struct page as a list. */ #define PAGE_TO_LIST(p) ( (struct list_head *)&p->private ) #define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l), \ - struct page, private ) ) + struct page, private ) ) #define UNLIST_PAGE(p) do { list_del(PAGE_TO_LIST(p)); \ p->mapping = NULL; \ p->private = 0; } while(0) @@ -297,25 +307,96 @@ schedule_work(&balloon_worker); } -static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) -{ - switch ( msg->subtype ) - { - case CMSG_MEM_REQUEST_SET: - { - mem_request_t *req = (mem_request_t *)&msg->msg[0]; - set_new_target(req->target); - req->status = 0; - } - break; - - default: - msg->length = 0; - break; - } - - ctrl_if_send_response(msg); -} +static struct xenbus_watch xb_watch = +{ + .node = "memory" +}; + +/* FIXME: This is part of a dom0 sequencing workaround */ +static struct xenbus_watch root_watch = +{ + .node = "/" +}; + +/* React to a change in the target key */ +static void watch_target(struct xenbus_watch *watch, const char *node) +{ + unsigned long new_target; + int err; + + if(watch == &root_watch) + { + /* FIXME: This is part of a dom0 sequencing workaround */ + if(register_xenbus_watch(&xb_watch) == 0) + { + /* + We successfully set a watch on memory/target: + now we can stop watching root + */ + unregister_xenbus_watch(&root_watch); + balloon_xenbus_init=1; + } + else + { + return; + } + } + + err = xenbus_scanf("memory", "target", "%lu", &new_target); + + if(err != 1) + { + IPRINTK("Unable to read memory/target\n"); + return; + } + + set_new_target(new_target >> PAGE_SHIFT); + +} + +/* + Try to set up our watcher, if not already set + +*/ +void balloon_init_watcher(void) +{ + int err; + + if(!xen_start_info.store_evtchn) + { + IPRINTK("Delaying watcher init until xenstore is available\n"); + return; + } + + down(&xenbus_lock); + + if(! balloon_xenbus_init) + { + err = register_xenbus_watch(&xb_watch); + if(err) + { + /* BIG FAT FIXME: dom0 sequencing workaround + * dom0 can't set a watch on memory/target until + * after the tools create it. So, we have to watch + * the whole store until that happens. + * + * This will go away when we have the ability to watch + * non-existant keys + */ + register_xenbus_watch(&root_watch); + } + else + { + IPRINTK("Balloon xenbus watcher initialized\n"); + balloon_xenbus_init = 1; + } + } + + up(&xenbus_lock); + +} + +EXPORT_SYMBOL(balloon_init_watcher); static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) @@ -346,7 +427,6 @@ { int len; -#define K(_p) ((_p)<<(PAGE_SHIFT-10)) len = sprintf( page, "Current allocation: %8lu kB\n" @@ -354,13 +434,14 @@ "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Xen hard limit: ", - K(current_pages), K(target_pages), K(balloon_low), K(balloon_high)); + PAGES2KB(current_pages), PAGES2KB(target_pages), + PAGES2KB(balloon_low), PAGES2KB(balloon_high)); if ( hard_limit != ~0UL ) len += sprintf( page + len, "%8lu kB (inc. %8lu kB driver headroom)\n", - K(hard_limit), K(driver_pages)); + PAGES2KB(hard_limit), PAGES2KB(driver_pages)); else len += sprintf( page + len, @@ -396,9 +477,7 @@ balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; - - (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx, 0); - + /* Initialise the balloon with excess memory space. */ for ( pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++ ) { @@ -406,6 +485,11 @@ if ( !PageReserved(page) ) balloon_append(page); } + + xb_watch.callback = watch_target; + root_watch.callback = watch_target; + + balloon_init_watcher(); return 0; } diff -r 0db6a59abb95 -r b3ead8c9affb linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Aug 1 10:48:24 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Aug 1 17:03:55 2005 @@ -309,6 +309,9 @@ return err; } + /* Initialize non-xenbus drivers */ + balloon_init_watcher(); + down(&xenbus_lock); /* Enumerate devices in xenstore. */ xenbus_probe_devices("device"); diff -r 0db6a59abb95 -r b3ead8c9affb tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Mon Aug 1 10:48:24 2005 +++ b/tools/python/xen/xend/XendDomainInfo.py Mon Aug 1 17:03:55 2005 @@ -152,6 +152,9 @@ vm = cls(db) vm.construct(config) vm.saveToDB(sync=True) + # Flush info to xenstore immediately + vm.exportToDB() + return vm create = classmethod(create) @@ -172,6 +175,7 @@ log.debug('config=' + prettyprintstring(config)) vm.memory = info['mem_kb']/1024 + vm.target = info['mem_kb'] * 1024 if config: try: @@ -222,6 +226,7 @@ DBVar('restart_state', ty='str'), DBVar('restart_time', ty='float'), DBVar('restart_count', ty='int'), + DBVar('target', ty='long', path="memory/target"), ] def __init__(self, db): @@ -239,6 +244,8 @@ self.memory = None self.ssidref = None self.image = None + + self.target = None self.channel = None self.store_channel = None @@ -315,6 +322,7 @@ self.info = info self.memory = self.info['mem_kb'] / 1024 self.ssidref = self.info['ssidref'] + self.target = self.info['mem_kb'] * 1024 def state_set(self, state): self.state_updated.acquire() @@ -399,7 +407,8 @@ ['id', self.id], ['name', self.name], ['memory', self.memory], - ['ssidref', self.ssidref] ] + ['ssidref', self.ssidref], + ['target', self.target] ] if self.uuid: sxpr.append(['uuid', self.uuid]) if self.info: @@ -536,6 +545,7 @@ self.memory = int(sxp.child_value(config, 'memory')) if self.memory is None: raise VmError('missing memory size') + self.target = self.memory * (1 << 20) self.ssidref = int(sxp.child_value(config, 'ssidref')) cpu = sxp.child_value(config, 'cpu') if self.recreate and self.id and cpu is not None and int(cpu) >= 0: @@ -947,11 +957,12 @@ index[field_name] = field_index + 1 def mem_target_set(self, target): - """Set domain memory target in pages. - """ - if self.channel: - msg = messages.packMsg('mem_request_t', { 'target' : target * (1 << 8)} ) - self.channel.writeRequest(msg) + """Set domain memory target in bytes. + """ + if target: + self.target = target * (1 << 20) + # Commit to XenStore immediately + self.exportToDB() def vcpu_hotplug(self, vcpu, state): """Disable or enable VCPU in domain. -- Dan Smith IBM Linux Technology Center Open Hypervisor Team email: danms@xxxxxxxxxx _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |