|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 3/3] x86/ioreq server: Add HVMOP to map guest ram with p2m_ioreq_server to an ioreq server
On 31/03/16 11:53, Yu Zhang wrote:
> A new HVMOP - HVMOP_map_mem_type_to_ioreq_server, is added to
> let one ioreq server claim/disclaim its responsibility for the
> handling of guest pages with p2m type p2m_ioreq_server. Users
> of this HVMOP can specify whether the p2m_ioreq_server is supposed
> to handle write accesses or read ones or both in a parameter named
> flags. For now, we only support one ioreq server for this p2m type,
> so once an ioreq server has claimed its ownership, subsequent calls
> of the HVMOP_map_mem_type_to_ioreq_server will fail. Users can also
> disclaim the ownership of guest ram pages with this p2m type, by
> triggering this new HVMOP, with ioreq server id set to the current
> owner's and flags parameter set to 0.
>
> For now, both HVMOP_map_mem_type_to_ioreq_server and p2m_ioreq_server
> are only supported for HVMs with HAP enabled.
>
> Note that flags parameter(if not 0) of this HVMOP only indicates
> which kind of memory accesses are to be forwarded to an ioreq server,
> it has impact on the access rights of guest ram pages, but are not
> the same. Due to hardware limitations, if only write operations are
> to be forwarded, read ones will be performed at full speed, with
> no hypervisor intervention. But if read ones are to be forwarded to
> an ioreq server, writes will inevitably be trapped into hypervisor,
> which means significant performance impact.
>
> Also note that this HVMOP_map_mem_type_to_ioreq_server will not
> change the p2m type of any guest ram page, until HVMOP_set_mem_type
> is triggered. So normally the steps should be the backend driver
> first claims its ownership of guest ram pages with p2m_ioreq_server
> type, and then sets the memory type to p2m_ioreq_server for specified
> guest ram pages.
>
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Tim Deegan <tim@xxxxxxx>
> ---
> xen/arch/x86/hvm/emulate.c | 125
> +++++++++++++++++++++++++++++++++++++--
> xen/arch/x86/hvm/hvm.c | 95 +++++++++++++++++++++++++++--
> xen/arch/x86/mm/hap/nested_hap.c | 2 +-
> xen/arch/x86/mm/p2m-ept.c | 14 ++++-
> xen/arch/x86/mm/p2m-pt.c | 25 +++++---
> xen/arch/x86/mm/p2m.c | 82 +++++++++++++++++++++++++
> xen/arch/x86/mm/shadow/multi.c | 3 +-
> xen/include/asm-x86/p2m.h | 36 +++++++++--
> xen/include/public/hvm/hvm_op.h | 37 ++++++++++++
> 9 files changed, 395 insertions(+), 24 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index ddc8007..77a4793 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -94,11 +94,69 @@ static const struct hvm_io_handler null_handler = {
> .ops = &null_ops
> };
>
> +static int mem_read(const struct hvm_io_handler *io_handler,
> + uint64_t addr,
> + uint32_t size,
> + uint64_t *data)
> +{
> + struct domain *currd = current->domain;
> + unsigned long gmfn = paddr_to_pfn(addr);
> + unsigned long offset = addr & ~PAGE_MASK;
> + struct page_info *page = get_page_from_gfn(currd, gmfn, NULL,
> P2M_UNSHARE);
> + uint8_t *p;
> +
> + if ( !page )
> + return X86EMUL_UNHANDLEABLE;
> +
> + p = __map_domain_page(page);
> + p += offset;
> + memcpy(data, p, size);
What happens when offset + size crosses the page boundary?
> diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
> index a1eae52..d46f186 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -489,6 +489,43 @@ struct xen_hvm_altp2m_op {
> typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
>
> +#if defined(__XEN__) || defined(__XEN_TOOLS__)
> +
> +/*
> + * HVMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
> + * to specific memroy type <type>
> + * for specific accesses <flags>
> + *
> + * Note that if only write operations are to be forwarded to an ioreq server,
> + * read operations will be performed with no hypervisor intervention. But if
> + * flags indicates that read operations are to be forwarded to an ioreq
> server,
> + * write operations will inevitably be trapped into hypervisor, whether they
> + * are emulated by hypervisor or forwarded to ioreq server depends on the
> flags
> + * setting. This situation means significant performance impact.
> + */
> +#define HVMOP_map_mem_type_to_ioreq_server 26
> +struct xen_hvm_map_mem_type_to_ioreq_server {
> + domid_t domid; /* IN - domain to be serviced */
> + ioservid_t id; /* IN - ioreq server id */
> + hvmmem_type_t type; /* IN - memory type */
hvmmem_type_t is an enum and doesn't have a fixed width. It can't be
used in the public API.
You also have some implicit padding holes as a result of the layout.
~Andrew
> + uint32_t flags; /* IN - types of accesses to be forwarded to the
> + ioreq server. flags with 0 means to unmap the
> + ioreq server */
> +#define _HVMOP_IOREQ_MEM_ACCESS_READ 0
> +#define HVMOP_IOREQ_MEM_ACCESS_READ \
> + (1u << _HVMOP_IOREQ_MEM_ACCESS_READ)
> +
> +#define _HVMOP_IOREQ_MEM_ACCESS_WRITE 1
> +#define HVMOP_IOREQ_MEM_ACCESS_WRITE \
> + (1u << _HVMOP_IOREQ_MEM_ACCESS_WRITE)
> +};
> +typedef struct xen_hvm_map_mem_type_to_ioreq_server
> + xen_hvm_map_mem_type_to_ioreq_server_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_map_mem_type_to_ioreq_server_t);
> +
> +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
> +
> +
> #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
>
> /*
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |