|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 06/23] xsplice: Implement payload loading (v4)
On 12/02/16 18:05, Konrad Rzeszutek Wilk wrote:
I will refrain from repeating the same review from previous patches.
> +int xsplice_verify_elf(struct xsplice_elf *elf, uint8_t *data)
> +{
> +
> + Elf_Ehdr *hdr = (Elf_Ehdr *)data;
> +
> + if ( elf->len < (sizeof *hdr) ||
> + !IS_ELF(*hdr) ||
At the very least, this should return -EINVAL for "not an elf", to
differenciate it from "not the right kind of elf" below.
> + hdr->e_ident[EI_CLASS] != ELFCLASS64 ||
> + hdr->e_ident[EI_DATA] != ELFDATA2LSB ||
> + hdr->e_ident[EI_OSABI] != ELFOSABI_SYSV ||
> + hdr->e_machine != EM_X86_64 ||
> + hdr->e_type != ET_REL ||
> + hdr->e_phnum != 0 )
> + {
> + printk(XENLOG_ERR "%s: Invalid ELF file.\n", elf->name);
Where possible, please avoid punction in error messages. Its just
wasted characters on the uart.
I would also suggest the error message be "xpatch '%s': Invalid ELF
file\n" to give the observer some clue that we are referring to payload
attached to a specific xsplice patch.
> + return -EOPNOTSUPP;
> + }
> +
> + return 0;
> +}
> +
> +int xsplice_perform_rel(struct xsplice_elf *elf,
> + struct xsplice_elf_sec *base,
> + struct xsplice_elf_sec *rela)
> +{
> + printk(XENLOG_ERR "%s: SHR_REL relocation unsupported\n", elf->name);
Simiarly here. All the error messages should have some common
indication that we are in the xsplice subsystem.
> + return -ENOSYS;
> +}
> +
> <snip>
> @@ -378,6 +391,232 @@ int xsplice_control(xen_sysctl_xsplice_op_t *xsplice)
> return rc;
> }
>
> +#ifdef CONFIG_X86
> +static void find_hole(ssize_t pages, unsigned long *hole_start,
> + unsigned long *hole_end)
Find a hole in what?
Also, shouldn't this code live in arch/x86/xsplice rather than a
CONFIG_X86 section of common xsplice?
> +{
> + struct payload *data, *data2;
> +
> + spin_lock(&payload_lock);
> + list_for_each_entry ( data, &payload_list, list )
> + {
> + list_for_each_entry ( data2, &payload_list, list )
> + {
> + unsigned long start, end;
> +
> + start = (unsigned long)data2->payload_address;
> + end = start + data2->payload_pages * PAGE_SIZE;
> + if ( *hole_end > start && *hole_start < end )
> + {
> + *hole_start = end;
> + *hole_end = *hole_start + pages * PAGE_SIZE;
> + break;
> + }
> + }
> + if ( &data2->list == &payload_list )
> + break;
> + }
> + spin_unlock(&payload_lock);
> +}
> +
> +/*
> + * The following functions prepare an xSplice payload to be executed by
> + * allocating space, loading the allocated sections, resolving symbols,
> + * performing relocations, etc.
> + */
> +static void *alloc_payload(size_t size)
> +{
> + mfn_t *mfn, *mfn_ptr;
> + size_t pages, i;
> + struct page_info *pg;
> + unsigned long hole_start, hole_end, cur;
> +
> + ASSERT(size);
> +
> + /*
> + * Copied from vmalloc which allocates pages and then maps them to an
> + * arbitrary virtual address with PAGE_HYPERVISOR. We need specific
> + * virtual address with PAGE_HYPERVISOR_RWX.
Can we please therefore please extend the existing valloc infrastructure
rather than copying.
Also, nack to introducing any new uses of RWX. It poses an unnecessary
security risk, and I am (slowly sadly) trying to remove all uses of it
outside the __init code.
The pages should be mapped RW to start with, then relocated etc, then
having the text section modified to RX just before use.
> + */
> + pages = PFN_UP(size);
> + mfn = xmalloc_array(mfn_t, pages);
> + if ( mfn == NULL )
> + return NULL;
> +
> <snip>
> +
> +static int move_payload(struct payload *payload, struct xsplice_elf *elf)
> +{
> + uint8_t *buf;
> + unsigned int i;
> + size_t size = 0;
> +
> + /* Compute text regions */
> + for ( i = 0; i < elf->hdr->e_shnum; i++ )
> + {
> + if ( (elf->sec[i].sec->sh_flags & (SHF_ALLOC|SHF_EXECINSTR)) ==
> + (SHF_ALLOC|SHF_EXECINSTR) )
> + calc_section(&elf->sec[i], &size);
> + }
> +
> + /* Compute rw data */
> + for ( i = 0; i < elf->hdr->e_shnum; i++ )
> + {
> + if ( (elf->sec[i].sec->sh_flags & SHF_ALLOC) &&
> + !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
> + (elf->sec[i].sec->sh_flags & SHF_WRITE) )
> + calc_section(&elf->sec[i], &size);
> + }
> +
> + /* Compute ro data */
> + for ( i = 0; i < elf->hdr->e_shnum; i++ )
> + {
> + if ( (elf->sec[i].sec->sh_flags & SHF_ALLOC) &&
> + !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
> + !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
> + calc_section(&elf->sec[i], &size);
> + }
> +
> + buf = alloc_payload(size);
> + if ( !buf ) {
> + printk(XENLOG_ERR "%s: Could not allocate memory for module\n",
> + elf->name);
> + return -ENOMEM;
> + }
> + memset(buf, 0, size);
alloc_payload() should ensure the 0-ness of buf. That way, you can get
clear pages "for free" by taking from a zeroed pool, rather than forcing
a rezero of a probably-zero buffer.
> +
> + for ( i = 0; i < elf->hdr->e_shnum; i++ )
> + {
> + if ( elf->sec[i].sec->sh_flags & SHF_ALLOC )
> + {
> + elf->sec[i].load_addr = buf + elf->sec[i].sec->sh_entsize;
> + /* Don't copy NOBITS - such as BSS. */
> + if ( elf->sec[i].sec->sh_type != SHT_NOBITS )
> + {
> + memcpy(elf->sec[i].load_addr, elf->sec[i].data,
> + elf->sec[i].sec->sh_size);
> + printk(XENLOG_DEBUG "%s: Loaded %s at 0x%p\n",
> + elf->name, elf->sec[i].name, elf->sec[i].load_addr);
> + }
> + }
> + }
> +
> + payload->payload_address = buf;
> + payload->payload_pages = PFN_UP(size);
> +
> + return 0;
> +}
> +
> <snip>
> diff --git a/xen/include/xen/xsplice.h b/xen/include/xen/xsplice.h
> index cf465c4..d71c898 100644
> --- a/xen/include/xen/xsplice.h
> +++ b/xen/include/xen/xsplice.h
> @@ -1,10 +1,22 @@
> #ifndef __XEN_XSPLICE_H__
> #define __XEN_XSPLICE_H__
>
> +struct xsplice_elf;
> +struct xsplice_elf_sec;
> +struct xsplice_elf_sym;
> struct xen_sysctl_xsplice_op;
>
> #ifdef CONFIG_XSPLICE
> int xsplice_control(struct xen_sysctl_xsplice_op *);
> +
> +/* Arch hooks */
Can we name them arch_xsplice_$FOO then, to make it obvious?
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |