[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN][PATCH v11 02/20] common/device_tree.c: unflatten_device_tree() propagate errors
This will be useful in dynamic node programming when new dt nodes are unflattend during runtime. Invalid device tree node related errors should be propagated back to the caller. Signed-off-by: Vikram Garhwal <vikram.garhwal@xxxxxxx> Reviewed-by: Michal Orzel <michal.orzel@xxxxxxx> Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> --- Changes from v9: Replace __be64 with void. Changes from v7: Free allocated memory in case of errors when calling unflatten_dt_node. --- --- xen/common/device_tree.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/xen/common/device_tree.c b/xen/common/device_tree.c index 7c6b41c3b4..b6d9f018c6 100644 --- a/xen/common/device_tree.c +++ b/xen/common/device_tree.c @@ -2110,6 +2110,9 @@ static int __init __unflatten_device_tree(const void *fdt, /* First pass, scan for size */ start = ((unsigned long)fdt) + fdt_off_dt_struct(fdt); size = unflatten_dt_node(fdt, 0, &start, NULL, NULL, 0); + if ( !size ) + return -EINVAL; + size = (size | 3) + 1; dt_dprintk(" size is %#lx allocating...\n", size); @@ -2127,11 +2130,21 @@ static int __init __unflatten_device_tree(const void *fdt, start = ((unsigned long)fdt) + fdt_off_dt_struct(fdt); unflatten_dt_node(fdt, mem, &start, NULL, &allnextp, 0); if ( be32_to_cpup((__be32 *)start) != FDT_END ) - printk(XENLOG_WARNING "Weird tag at end of tree: %08x\n", + { + printk(XENLOG_ERR "Weird tag at end of tree: %08x\n", *((u32 *)start)); + xfree((void *)mem); + return -EINVAL; + } + if ( be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeefU ) - printk(XENLOG_WARNING "End of tree marker overwritten: %08x\n", + { + printk(XENLOG_ERR "End of tree marker overwritten: %08x\n", be32_to_cpu(((__be32 *)mem)[size / 4])); + xfree((void *)mem); + return -EINVAL; + } + *allnextp = NULL; dt_dprintk(" <- unflatten_device_tree()\n"); -- 2.17.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |