[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC v1 4/8] x86/init: add linker table support



> diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
> index 137dfa96aa14..e66b030e82bc 100644
> --- a/arch/x86/Kconfig.debug
> +++ b/arch/x86/Kconfig.debug
> @@ -400,4 +400,51 @@ config PUNIT_ATOM_DEBUG
>         The current power state can be read from
>         /sys/kernel/debug/punit_atom/dev_power_state
>  
> +config X86_DEBUG_LINKER_TABLES
> +     bool "x86 linker table debug"
> +     depends on DEBUG_KERNEL
> +     default n
> +     ---help---
> +       Enabling this should enable debugging linker tables on x86
> +       with its own solution. To help debug it enables the "x86 beta"

its own solution? Would it be good to just name what that solution is?

> +       and "x86 gamma" built-in feature. Both "x86 alpha" and "x86 delta"
> +       built-in features are left disabled. You should see all tables
> +       compiled, except delta, only the beta and gamma feature will be
> +       linked in. The delta feature will only be compiled and linked in
> +       if and only if its enabled the old way.

Whoa. Beta Alpha Delta Gamma? You lost me :-)

> +
> +       For more details on these linker table refer to:
> +
> +       include/linux/tables.h
> +
> +       If unsure, say N.
> +
> +config X86_DEBUG_LINKER_TABLE_ALPHA
> +     bool "x86 linker table alpha"
> +     default n
> +     depends on X86_DEBUG_LINKER_TABLES
> +     ---help---
> +       Enabling this should enable the linker table "x86 alpha" feature.
> +
> +config X86_DEBUG_LINKER_TABLE_BETA
> +     bool "x86 linker table beta"
> +     default y
> +     depends on X86_DEBUG_LINKER_TABLES
> +     ---help---
> +       Enabling this should enable the linker table "x86 beta" feature.
> +
> +config X86_DEBUG_LINKER_TABLE_GAMMA
> +     bool "x86 linker table gamma"
> +     default y
> +     depends on X86_DEBUG_LINKER_TABLES
> +     ---help---
> +       Enabling this should enable the linker table "x86 gamma" feature.

Which has nothing to with gamma radiation right :-)

> +
> +config X86_DEBUG_LINKER_TABLE_DELTA
> +     bool "x86 linker table delta"
> +     default n
> +     depends on X86_DEBUG_LINKER_TABLES
> +     ---help---
> +       Enabling this should enable the linker table "x86 delta" feature.
> +
>  endmenu
> diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
> index cd0fc0cc78bc..b1b941e9ccd7 100644
> --- a/arch/x86/include/asm/x86_init.h
> +++ b/arch/x86/include/asm/x86_init.h
> +/**
> + * enum x86_init_fn_flags: flags for init sequences
> + *
> + * X86_INIT_FINISH_IF_DETECTED: tells the core that once this init sequence
> + *   has completed it can break out of the loop for init sequences on
> + *   its own level.
> + * X86_INIT_DETECTED: private flag. Used by the x86 core to annotate that 
> this
> + *   init sequence has been detected and it all of its callbacks
> + *   must be run during initialization.
> + */
> +enum x86_init_fn_flags {

Should you have a 'default' one? That is an default one?
X86_INIT_IGNORE = 0,

> +     X86_INIT_FINISH_IF_DETECTED = BIT(0),
> +     X86_INIT_DETECTED = BIT(1),
> +};
> +
> +/* The x86 initialisation function table */
> +#define X86_INIT_FNS __table(struct x86_init_fn, "x86_init_fns")
> +
> +/* Used to declares an x86 initialization table */

s/declares/declare/
Also missing stop.

> +#define __x86_init_fn(order_level) __table_entry(X86_INIT_FNS, order_level)
> +
> +/* Init order levels, we can start at 01 but reserve 01-09 for now */
> +#define X86_INIT_ORDER_EARLY 10
> +#define X86_INIT_ORDER_NORMAL        30
> +#define X86_INIT_ORDER_LATE  50

Something is odd with those tabs/spaces.

> +
> +/*
> + * Use LTO_REFERENCE_INITCALL just in case of issues with old versions of 
> gcc.
> + * This might not be needed for linker tables due to how we compartamentalize

s/compartamentalize/compartmentalize/

> + * sections and then order them at linker time, but just in case.
> + */
> +
> +#define x86_init(__level,                                            \
> +              __supp_hardware_subarch,                               \
> +              __detect,                                              \
> +              __depend,                                              \
> +              __early_init)                                          \
> +     static struct x86_init_fn __x86_init_fn_##__early_init __used   \
> +             __x86_init_fn(__level) = {                              \
> +             .order_level = __level,                                 \
> +             .supp_hardware_subarch = __supp_hardware_subarch,       \
> +             .detect = __detect,                                     \
> +             .depend = __depend,                                     \
> +             .early_init = __early_init,                             \

.flags = 0, ? or  X86_INIT_IGNORE ?

> +     };                                                              \
> +     LTO_REFERENCE_INITCALL(__x86_init_fn_##__early_init);
> +
> +#define x86_init_early(__supp_hardware_subarch,                              
> \

The first \ is at an odd column.

> +                    __detect,                                        \
> +                    __depend,                                        \
> +                    __early_init)                                    \
> +     x86_init(X86_INIT_ORDER_EARLY, __supp_hardware_subarch,         \
> +              __detect, __depend,                                    \
> +              __early_init);
> +
> +#define x86_init_normal(__supp_hardware_subarch,                     \
> +                    __detect,                                        \
> +                    __depend,                                        \
> +                    __early_init)                                    \
> +     x86_init(__name, X86_INIT_ORDER_NORMAL, __supp_hardware_subarch,\
> +              __detect, __depend,                                    \
> +              __early_init);
> +
> +#define x86_init_early_all(__detect,                                 \
> +                        __depend,                                    \
> +                        __early_init)                                \
> +     x86_init_early(__name, X86_SUBARCH_ALL_SUBARCHS,                \
> +                    __detect, __depend,                              \
> +                    __early_init);
> +
> +#define x86_init_early_pc(__detect,                                  \
> +                       __depend,                                     \
> +                       __early_init)                                 \
> +     x86_init_early(BIT(X86_SUBARCH_PC),                             \
> +                    __detect, __depend,                              \
> +                    __early_init);
> +
> +#define x86_init_early_pc_simple(__early_init)                               
> \

Ditto
> +     x86_init_early((BIT(X86_SUBARCH_PC)), NULL, NULL,               \
> +                    __early_init);
> +
> +#define x86_init_normal_all(__detect,                                        
> \

Ditto
> +                         __depend,                                   \
> +                         __early_init)                               \
> +     x86_init_normal(X86_SUBARCH_ALL_SUBARCHS,                       \
> +                     __detect, __depend,                             \
> +                     __early_init);
> +
> +#define x86_init_normal_pc(__detect,                                 \
> +                        __depend,                                    \
> +                        __early_init)                                \
> +     x86_init_normal((BIT(X86_SUBARCH_PC)),                          \
> +                     __detect, __depend,                             \
> +                     __early_init);
> +
> +
> +#define x86_init_normal_xen(__detect,                                        
> \

Ditto
> +                         __depend,                                   \
> +                         __early_init)                               \
> +     x86_init_normal((BIT(X86_SUBARCH_XEN)),                         \
> +                     __detect, __depend,                             \
> +                     __early_init);
> +
> +/**
> + * x86_init_fn_early_init: call all early_init() callbacks
> + *
> + * This calls all early_init() callbacks on the x86_init_fns linker table.
> + */
> +void x86_init_fn_early_init(void);
> +
> +/**
> + * x86_init_fn_init_tables - sort and check x86 linker table
> + *
> + * This sorts struct x86_init_fn init sequences in the x86_init_fns linker
> + * table by ensuring that init sequences that depend on other init sequences
> + * are placed later in the linker table. Init sequences that do not have
> + * dependencies are left in place. Circular dependencies are not allowed.
> + * The order-level of subordinate init sequences, that is of init sequences
> + * that depend on other init sequences, must have an order-level of lower
> + * or equal priority to the init sequence it depends on.
> + *
> + * This also validates semantics of all struct x86_init_fn init sequences
> + * on the x86_init_fns linker table.
> + */
> +void x86_init_fn_init_tables(void);
> +
> +#endif /* __X86_INIT_TABLES_H */
> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
> index b1b78ffe01d0..be167a0a5e2c 100644
> --- a/arch/x86/kernel/Makefile
> +++ b/arch/x86/kernel/Makefile
> @@ -23,6 +23,8 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
>  CFLAGS_irq.o := -I$(src)/../include/asm/trace
>  
>  obj-y                        := process_$(BITS).o signal.o
> +obj-y                        += init.o sort-init.o
> +obj-$(CONFIG_X86_DEBUG_LINKER_TABLES) += dbg-tables/
>  obj-$(CONFIG_COMPAT) += signal_compat.o
>  obj-y                        += traps.o irq.o irq_$(BITS).o 
> dumpstack_$(BITS).o
>  obj-y                        += time.o ioport.o dumpstack.o nmi.o
> diff --git a/arch/x86/kernel/dbg-tables/Makefile 
> b/arch/x86/kernel/dbg-tables/Makefile
> new file mode 100644
> index 000000000000..02d12c502ad0
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/Makefile
> @@ -0,0 +1,18 @@
> +# You should see all these compile but at run time you'd
> +# only see the ones that were linked in.
> +
> +# This ensures we always compile but only link
> +# in alpha if it was enabled. This is typically what
> +# you are expected to use to avoid code bit-rot.
> +table-$(CONFIG_X86_DEBUG_LINKER_TABLE_ALPHA) += alpha.o
> +
> +# This accomplishes the same, but requires 2 lines.
> +extra-y                                              += beta.o
> +obj-$(CONFIG_X86_DEBUG_LINKER_TABLE_BETA)    += beta.o
> +
> +table-$(CONFIG_X86_DEBUG_LINKER_TABLE_GAMMA) += gamma.o
> +
> +# If you *know* you only want to enable compilation of
> +# a feature when its selected you can just use the good
> +# ol' obj
> +obj-$(CONFIG_X86_DEBUG_LINKER_TABLE_DELTA)   += delta.o
> diff --git a/arch/x86/kernel/dbg-tables/alpha.c 
> b/arch/x86/kernel/dbg-tables/alpha.c
> new file mode 100644
> index 000000000000..54754f893a08
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/alpha.c
> @@ -0,0 +1,10 @@
> +#define pr_fmt(fmt) "debug-alpha: " fmt
> +
> +#include <linux/kernel.h>
> +#include <asm/x86_init.h>
> +
> +static void early_init_dbg_alpha(void) {
> +     pr_info("early_init triggered\n");
> +}
> +
> +x86_init_early_pc_simple(early_init_dbg_alpha);
> diff --git a/arch/x86/kernel/dbg-tables/beta.c 
> b/arch/x86/kernel/dbg-tables/beta.c
> new file mode 100644
> index 000000000000..7384a57fc386
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/beta.c
> @@ -0,0 +1,18 @@
> +#define pr_fmt(fmt) "debug-beta: " fmt
> +
> +#include <linux/kernel.h>
> +#include <asm/x86_init.h>
> +
> +#include "gamma.h"
> +
> +static bool x86_dbg_detect_beta(void)
> +{
> +     return true;
> +}
> +
> +static void early_init_dbg_beta(void) {
> +     pr_info("early_init triggered\n");
> +}
> +x86_init_early_pc(x86_dbg_detect_beta,
> +               x86_dbg_detect_gamma,
> +               early_init_dbg_beta);
> diff --git a/arch/x86/kernel/dbg-tables/delta.c 
> b/arch/x86/kernel/dbg-tables/delta.c
> new file mode 100644
> index 000000000000..9d38c68e602a
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/delta.c
> @@ -0,0 +1,10 @@
> +#define pr_fmt(fmt) "debug-delta: " fmt
> +
> +#include <linux/kernel.h>
> +#include <asm/x86_init.h>
> +
> +static void early_init_dbg_delta(void) {
> +     pr_info("early_init triggered\n");
> +}
> +
> +x86_init_early_pc_simple(early_init_dbg_delta);
> diff --git a/arch/x86/kernel/dbg-tables/gamma.c 
> b/arch/x86/kernel/dbg-tables/gamma.c
> new file mode 100644
> index 000000000000..7b663c1f08f4
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/gamma.c
> @@ -0,0 +1,18 @@
> +#define pr_fmt(fmt) "debug-gamma: " fmt
> +
> +#include <linux/kernel.h>
> +#include <asm/x86_init.h>
> +
> +bool x86_dbg_detect_gamma(void)
> +{
> +     return true;
> +}
> +
> +static void early_init_dbg_gamma(void)
> +{
> +     pr_info("early_init triggered\n");
> +}
> +
> +x86_init_early_pc(x86_dbg_detect_gamma,
> +               NULL,
> +               early_init_dbg_gamma);
> diff --git a/arch/x86/kernel/dbg-tables/gamma.h 
> b/arch/x86/kernel/dbg-tables/gamma.h
> new file mode 100644
> index 000000000000..810d450ddd14
> --- /dev/null
> +++ b/arch/x86/kernel/dbg-tables/gamma.h
> @@ -0,0 +1,3 @@
> +#include <asm/x86_init.h>
> +
> +bool x86_dbg_detect_gamma(void);

OK, now that makes sense. I would split the debug functionality
in a seperate patch. And update the description in the Kconfig 
to say what you would expect.

> diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
> index 2911ef3a9f1c..d93f3e42e61b 100644
> --- a/arch/x86/kernel/head32.c
> +++ b/arch/x86/kernel/head32.c
> @@ -19,6 +19,7 @@
>  #include <asm/bios_ebda.h>
>  #include <asm/tlbflush.h>
>  #include <asm/bootparam_utils.h>
> +#include <asm/x86_init.h>
>  
>  static void __init i386_default_early_setup(void)
>  {
> @@ -47,5 +48,8 @@ asmlinkage __visible void __init i386_start_kernel(void)
>               break;
>       }
>  
> +     x86_init_fn_init_tables();
> +     x86_init_fn_early_init();
> +
>       start_kernel();
>  }
> diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
> index f129a9af6357..f83263a8d0ed 100644
> --- a/arch/x86/kernel/head64.c
> +++ b/arch/x86/kernel/head64.c
> @@ -28,6 +28,7 @@
>  #include <asm/bootparam_utils.h>
>  #include <asm/microcode.h>
>  #include <asm/kasan.h>
> +#include <asm/x86_init.h>
>  
>  /*
>   * Manage page tables very early on.
> @@ -190,6 +191,9 @@ void __init x86_64_start_reservations(char 
> *real_mode_data)
>       if (!boot_params.hdr.version)
>               copy_bootdata(__va(real_mode_data));
>  
> +     x86_init_fn_init_tables();
> +     x86_init_fn_early_init();
> +
>       reserve_ebda_region();
>  
>       start_kernel();
> diff --git a/arch/x86/kernel/init.c b/arch/x86/kernel/init.c
> new file mode 100644
> index 000000000000..5f9b662e2b34
> --- /dev/null
> +++ b/arch/x86/kernel/init.c
> @@ -0,0 +1,55 @@
> +#define pr_fmt(fmt) "x86-init: " fmt
> +
> +#include <linux/bug.h>
> +#include <linux/kernel.h>
> +
> +#include <asm/x86_init_fn.h>
> +#include <asm/bootparam.h>
> +#include <asm/boot.h>
> +#include <asm/setup.h>
> +
> +extern struct x86_init_fn __tbl_x86_start_init_fns[], 
> __tbl_x86_end_init_fns[];
> +
> +static bool x86_init_fn_supports_subarch(struct x86_init_fn *fn)
> +{
> +     if (!fn->supp_hardware_subarch) {
> +             pr_err("Init sequence fails to declares any supported subarchs: 
> %pF\n", fn->early_init);
> +             WARN_ON(1);
> +     }
> +     if (BIT(boot_params.hdr.hardware_subarch) & fn->supp_hardware_subarch)
> +             return true;
> +     return false;
> +}
> +
> +void x86_init_fn_early_init(void)
> +{
> +     int ret;
> +     struct x86_init_fn *init_fn;
> +     unsigned int num_inits = table_num_entries(X86_INIT_FNS);
> +
> +     if (!num_inits)
> +             return;
> +
> +     pr_debug("Number of init entries: %d\n", num_inits);
> +
> +     for_each_table_entry(init_fn, X86_INIT_FNS) {
> +             if (!x86_init_fn_supports_subarch(init_fn))
> +                     continue;
> +             if (!init_fn->detect)
> +                     init_fn->flags |= X86_INIT_DETECTED;
> +             else {
> +                     ret = init_fn->detect();
> +                     if (ret > 0)
> +                             init_fn->flags |= X86_INIT_DETECTED;
> +             }
> +
> +             if (init_fn->flags & X86_INIT_DETECTED) {
> +                     init_fn->flags |= X86_INIT_DETECTED;

Huh? It is already set.
> +                     pr_debug("Running early init %pF ...\n", 
> init_fn->early_init);
> +                     init_fn->early_init();
> +                     pr_debug("Completed early init %pF\n", 
> init_fn->early_init);
> +                     if (init_fn->flags & X86_INIT_FINISH_IF_DETECTED)
> +                             break;
> +             }
> +     }
> +}
> diff --git a/arch/x86/kernel/sort-init.c b/arch/x86/kernel/sort-init.c
> new file mode 100644
> index 000000000000..f230f3ddb0aa
> --- /dev/null
> +++ b/arch/x86/kernel/sort-init.c
> @@ -0,0 +1,114 @@
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/string.h>
> +#include <asm/x86_init_fn.h>
> +
> +extern struct x86_init_fn __tbl_x86_start_init_fns[], 
> __tbl_x86_end_init_fns[];
> +
> +static struct x86_init_fn *x86_init_fn_find_dep(struct x86_init_fn *start,
> +                                             struct x86_init_fn *finish,
> +                                             struct x86_init_fn *q)
> +{
> +     struct x86_init_fn *p;
> +
> +     if (!q)
> +             return NULL;
> +
> +     for (p = start; p < finish; p++)
> +             if (p->detect == q->depend)
> +                     return p;
> +
> +     return NULL;
> +}
> +
> +static void x86_init_fn_sort(struct x86_init_fn *start,
> +                          struct x86_init_fn *finish)
> +{
> +
> +     struct x86_init_fn *p, *q, tmp;
> +
> +     for (p = start; p < finish; p++) {
> +again:
> +             q = x86_init_fn_find_dep(start, finish, p);
> +             /*
> +              * We are bit sneaky here. We use the memory address to figure
> +              * out if the node we depend on is past our point, if so, swap.
> +              */
> +             if (q > p) {
> +                     tmp = *p;
> +                     memmove(p, q, sizeof(*p));
> +                     *q = tmp;
> +                     goto again;
> +             }
> +     }
> +
> +}
> +
> +static void x86_init_fn_check(struct x86_init_fn *start,
> +                           struct x86_init_fn *finish)
> +{
> +     struct x86_init_fn *p, *q, *x;
> +
> +     /* Simple cyclic dependency checker. */
> +     for (p = start; p < finish; p++) {
> +             if (!p->depend)
> +                     continue;
> +             q = x86_init_fn_find_dep(start, finish, p);
> +             x = x86_init_fn_find_dep(start, finish, q);
> +             if (p == x) {
> +                     pr_info("CYCLIC DEPENDENCY FOUND! %pF depends on %pF 
> and vice-versa. BREAKING IT.\n",
> +                             p->early_init, q->early_init);
> +                     /* Heavy handed way..*/
> +                     x->depend = 0;
> +             }
> +     }
> +
> +     /*
> +      * Validate sorting semantics.
> +      *
> +      * p depends on q so:
> +      *      - q must run first, so q < p. If q > p that's an issue
> +      *        as its saying p must run prior to q. We already sorted
> +      *        this table, this is a problem.
> +      *
> +      *      - q's order level must be <= than p's as it should run first
> +      */
> +     for (p = start; p < finish; p++) {
> +             if (!p->depend)
> +                     continue;
> +             /*
> +              * Be pedantic and do a full search on the entire table,
> +              * if we need further validation, after this is called
> +              * one could use an optimized version which just searches
> +              * on x86_init_fn_find_dep(p, finish, p), as we would have
> +              * guarantee on proper ordering both at the dependency level
> +              * and by order level.
> +              */
> +             q = x86_init_fn_find_dep(start, finish, p);
> +             if (q && q > p) {
> +                     pr_info("EXECUTION ORDER INVALID! %pF should be called 
> before %pF!\n",
> +                             p->early_init, q->early_init);
> +             }
> +
> +             /*
> +              * Technically this would still work as the memmove() would
> +              * have forced the dependency to run first, however we want
> +              * strong semantics, so lets avoid these.
> +              */
> +             if (q && q->order_level > p->order_level) {
> +                     pr_info("INVALID ORDER LEVEL! %pF should have an order 
> level <= be called before %pF!\n",
> +                             p->early_init, q->early_init);
> +             }
> +     }
> +}
> +
> +void x86_init_fn_init_tables(void)
> +{
> +     unsigned int num_inits = table_num_entries(X86_INIT_FNS);
> +
> +     if (!num_inits)
> +             return;
> +
> +     x86_init_fn_sort(__tbl_x86_start_init_fns, __tbl_x86_end_init_fns);
> +     x86_init_fn_check(__tbl_x86_start_init_fns, __tbl_x86_end_init_fns);
> +}
> diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
> index 74e4bf11f562..75c8f3f80e73 100644
> --- a/arch/x86/kernel/vmlinux.lds.S
> +++ b/arch/x86/kernel/vmlinux.lds.S
> @@ -265,6 +265,31 @@ SECTIONS
>               __apicdrivers_end = .;
>       }
>  
> +     /*
> +      * x86 linker tables: this will sort all order levels
> +      * lexicographically. Something like SORT_BY_INIT_PRIORITY()
> +      * which sorts specifically on numeric order is desirable but
> +      * that would impose a change on binutils to generalize
> +      * SORT_BY_INIT_PRIORITY() for any section, SORT_BY_INIT_PRIORITY()
> +      * is specific to the init sections, and that would also mean having
> +      * a bump on requirments for building Linux. Using SORT() should
> +      * suffice, * its what gpxe uses. For things in the same order
> +      * level we rely on order in the C file, and also order on the
> +      * Makefile. Since this solution also embraces the IOMMU init solution
> +      * it has further sorting semantics implemented in C and its semantics
> +      * are defined in x86_init_fn.h and implemented in sort-init.c.
> +      */
> +     .tbl            : {
> +             __tbl_x86_start_init_fns = .;
> +             *(SORT(.tbl.x86_init_fns.*))
> +             __tbl_x86_end_init_fns = .;
> +
> +             /* ... etc ... */
> +
> +             /* You would add other tables here as needed */
> +     }
> +  . = ALIGN(8);
> +

There is an extra .= ALIGN(8) there ?

>       . = ALIGN(8);
>       /*
>        * .exit.text is discard at runtime, not link time, to deal with
> diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
> index 0c2fae8d929d..22b1616de37c 100644
> --- a/arch/x86/tools/relocs.c
> +++ b/arch/x86/tools/relocs.c
> @@ -56,6 +56,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
>       "__x86_cpu_dev_(start|end)|"
>       "(__parainstructions|__alt_instructions)(|_end)|"
>       "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
> +     "(__tbl_x86_(start|end)_init_fns|"
>       "__(start|end)_pci_.*|"
>       "__(start|end)_builtin_fw|"
>       "__(start|stop)___ksymtab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
> -- 
> 2.6.2
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.