[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2 of 2] Lowmemd: Simple demo code to show use of VIRQ_ENOMEM



>>> On 28.02.12 at 22:56, Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> wrote:

Thanks for contributing this.

> --- a/tools/misc/Makefile
> +++ b/tools/misc/Makefile
> @@ -9,7 +9,7 @@ CFLAGS += $(CFLAGS_xeninclude)
>  HDRS     = $(wildcard *.h)
>  
>  TARGETS-y := xenperf xenpm xen-tmem-list-parse gtraceview gtracestat 
> xenlockprof xenwatchdogd
> -TARGETS-$(CONFIG_X86) += xen-detect xen-hvmctx xen-hvmcrash
> +TARGETS-$(CONFIG_X86) += xen-detect xen-hvmctx xen-hvmcrash lowmemd

If this should get committed (which I'm in favor of at least as a first
step, to get more advanced over time if needed), we would certainly
want to prefix the binary with xen- (since that's what it's for, it's not
dealing with domain side low memory conditions).

Jan

>  TARGETS-$(CONFIG_MIGRATE) += xen-hptool
>  TARGETS := $(TARGETS-y)
>  
> @@ -21,7 +21,7 @@ INSTALL_BIN-y := xencons
>  INSTALL_BIN-$(CONFIG_X86) += xen-detect
>  INSTALL_BIN := $(INSTALL_BIN-y)
>  
> -INSTALL_SBIN-y := xm xen-bugtool xen-python-path xend xenperf xsview xenpm 
> xen-tmem-list-parse gtraceview gtracestat xenlockprof xenwatchdogd 
> xen-ringwatch
> +INSTALL_SBIN-y := xm xen-bugtool xen-python-path xend xenperf xsview xenpm 
> xen-tmem-list-parse gtraceview gtracestat xenlockprof xenwatchdogd 
> xen-ringwatch lowmemd
>  INSTALL_SBIN-$(CONFIG_X86) += xen-hvmctx xen-hvmcrash
>  INSTALL_SBIN-$(CONFIG_MIGRATE) += xen-hptool
>  INSTALL_SBIN := $(INSTALL_SBIN-y)
> @@ -70,6 +70,9 @@ xen-hptool: xen-hptool.o
>  xenwatchdogd: xenwatchdogd.o
>       $(CC) $(LDFLAGS) -o $@ $< $(LDLIBS_libxenctrl) $(APPEND_LDFLAGS)
>  
> +lowmemd: lowmemd.o
> +     $(CC) $(LDFLAGS) -o $@ $< $(LDLIBS_libxenctrl) $(LDLIBS_libxenstore) 
> $(APPEND_LDFLAGS)
> +
>  gtraceview: gtraceview.o
>       $(CC) $(LDFLAGS) -o $@ $< $(CURSES_LIBS) $(APPEND_LDFLAGS)
>  
> diff -r c44e9fe0ecca -r 4300b0d76575 tools/misc/lowmemd.c
> --- /dev/null
> +++ b/tools/misc/lowmemd.c
> @@ -0,0 +1,148 @@
> +/*
> + * lowmemd: demo VIRQ_ENOMEM
> + * Andres Lagar-Cavilla (GridCentric Inc.)
> + */
> +
> +#include <stdio.h>
> +#include <xenctrl.h>
> +#include <xs.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +static evtchn_port_t virq_port      = -1;
> +static xc_evtchn *xce_handle        = NULL;
> +static xc_interface *xch            = NULL;
> +static struct xs_handle *xs_handle  = NULL;
> +
> +void cleanup(void)
> +{
> +    if (virq_port > -1)
> +        xc_evtchn_unbind(xce_handle, virq_port);
> +    if (xce_handle)
> +        xc_evtchn_close(xce_handle);
> +    if (xch)
> +        xc_interface_close(xch);
> +    if (xs_handle)
> +        xs_daemon_close(xs_handle);
> +}
> +
> +/* Never shrink dom0 below 1 GiB */
> +#define DOM0_FLOOR  (1 << 30)
> +#define DOM0_FLOOR_PG   ((DOM0_FLOOR) >> 12)
> +
> +/* Act if free memory is less than 92 MiB */
> +#define THRESHOLD   (92 << 20)
> +#define THRESHOLD_PG    ((THRESHOLD) >> 12)
> +
> +#define BUFSZ 512
> +void handle_low_mem(void)
> +{
> +    xc_dominfo_t  dom0_info;
> +    xc_physinfo_t info;
> +    unsigned long long free_pages, dom0_pages, diff, dom0_target;
> +    char data[BUFSZ], error[BUFSZ];
> +
> +    if (xc_physinfo(xch, &info) < 0)
> +    {
> +        perror("Getting physinfo failed");
> +        return;
> +    }
> +
> +    free_pages = (unsigned long long) info.free_pages;
> +    printf("Available free pages: 0x%llx:%llux\n",
> +            free_pages, free_pages);
> +
> +    /* Don't do anything if we have more than the threshold free */
> +    if ( free_pages >= THRESHOLD_PG )
> +        return;
> +    diff = THRESHOLD_PG - free_pages; 
> +
> +    if (xc_domain_getinfo(xch, 0, 1, &dom0_info) < 1)
> +    {
> +        perror("Failed to get dom0 info");
> +        return;
> +    }
> +
> +    dom0_pages = (unsigned long long) dom0_info.nr_pages;
> +    printf("Dom0 pages: 0x%llx:%llu\n", dom0_pages, dom0_pages);
> +    dom0_target = dom0_pages - diff;
> +    if (dom0_target <= DOM0_FLOOR_PG)
> +        return;
> +
> +    printf("Shooting for dom0 target 0x%llx:%llu\n", 
> +            dom0_target, dom0_target);
> +
> +    snprintf(data, BUFSZ, "%llu", dom0_target);
> +    if (!xs_write(xs_handle, XBT_NULL, 
> +            "/local/domain/0/memory/target", data, strlen(data)))
> +    {
> +        snprintf(error, BUFSZ,"Failed to write target %s to xenstore", 
> data);
> +        perror(error);
> +    }
> +}
> +
> +int main(int argc, char *argv[])
> +{
> +    int rc;
> +
> +    atexit(cleanup);
> +
> +     xch = xc_interface_open(NULL, NULL, 0);
> +     if (xch == NULL)
> +    {
> +        perror("Failed to open xc interface");
> +        return 1;
> +    }
> +
> +     xce_handle = xc_evtchn_open(NULL, 0);
> +     if (xce_handle == NULL)
> +    {
> +        perror("Failed to open evtchn device");
> +        return 2;
> +    }
> +
> +    xs_handle = xs_daemon_open();
> +    if (xs_handle == NULL)
> +    {
> +        perror("Failed to open xenstore connection");
> +        return 3;
> +    }
> +
> +     if ((rc = xc_evtchn_bind_virq(xce_handle, VIRQ_ENOMEM)) == -1)
> +    {
> +        perror("Failed to bind to domain exception virq port");
> +        return 4;
> +    }
> +
> +    virq_port = rc;
> +    
> +    while(1)
> +    {
> +        evtchn_port_t port;
> +
> +        if ((port = xc_evtchn_pending(xce_handle)) == -1)
> +        {
> +            perror("Failed to listen for pending event channel");
> +            return 5;
> +        }
> +
> +        if (port != virq_port)
> +        {
> +            char data[BUFSZ];
> +            snprintf(data, BUFSZ, "Wrong port, got %d expected %d", port, 
> virq_port);
> +            perror(data);
> +            return 6;
> +        }
> +
> +        if (xc_evtchn_unmask(xce_handle, port) == -1)
> +        {
> +            perror("Failed to unmask port");
> +            return 7;
> +        }
> +
> +        printf("Got a virq kick, time to get work\n");
> +        handle_low_mem();
> +    }
> +
> +    return 0;
> +}



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.