[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: add HVMOP_get_mem_type hvmop
# HG changeset patch # User Olaf Hering <olaf@xxxxxxxxx> # Date 1304512678 -3600 # Node ID 26413986e6e06cedc2eb96c4b3c4b37f2df12125 # Parent 10f27b8b3d63959c7a8e15299a7a398b7ff7f230 x86/mm: add HVMOP_get_mem_type hvmop The balloon driver in the guest frees guest pages and marks them as mmio. When the kernel crashes and the crash kernel attempts to read the oldmem via /proc/vmcore a read from ballooned pages will generate 100% load in dom0 because Xen asks qemu-dm for the page content. Since the reads come in as 8byte requests each ballooned page is tried 512 times. Add a new hvmop HVMOP_get_mem_type to return the hvmmem_type_t for the given pfn. Pages which are neither ram or mmio will be HVMMEM_mmio_dm. This interface enables the crash kernel to skip ballooned pages. Signed-off-by: Olaf Hering <olaf@xxxxxxxxx> Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- diff -r 10f27b8b3d63 -r 26413986e6e0 xen/arch/ia64/vmx/vmx_hypercall.c --- a/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 12:00:40 2011 +0100 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed May 04 13:37:58 2011 +0100 @@ -217,6 +217,7 @@ break; } + case HVMOP_get_mem_type: case HVMOP_set_mem_type: case HVMOP_set_mem_access: case HVMOP_get_mem_access: diff -r 10f27b8b3d63 -r 26413986e6e0 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Mon May 02 12:00:40 2011 +0100 +++ b/xen/arch/x86/hvm/hvm.c Wed May 04 13:37:58 2011 +0100 @@ -3676,6 +3676,37 @@ break; } + case HVMOP_get_mem_type: + { + struct xen_hvm_get_mem_type a; + struct domain *d; + p2m_type_t t; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = rcu_lock_target_domain_by_id(a.domid, &d); + if ( rc != 0 ) + return rc; + + rc = -EINVAL; + if ( is_hvm_domain(d) ) + { + gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0); + if ( p2m_is_mmio(t) ) + a.mem_type = HVMMEM_mmio_dm; + else if ( p2m_is_readonly(t) ) + a.mem_type = HVMMEM_ram_ro; + else if ( p2m_is_ram(t) ) + a.mem_type = HVMMEM_ram_rw; + else + a.mem_type = HVMMEM_mmio_dm; + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + } + rcu_unlock_domain(d); + break; + } + case HVMOP_set_mem_type: { struct xen_hvm_set_mem_type a; diff -r 10f27b8b3d63 -r 26413986e6e0 xen/include/public/hvm/hvm_op.h --- a/xen/include/public/hvm/hvm_op.h Mon May 02 12:00:40 2011 +0100 +++ b/xen/include/public/hvm/hvm_op.h Wed May 04 13:37:58 2011 +0100 @@ -76,6 +76,12 @@ /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 +typedef enum { + HVMMEM_ram_rw, /* Normal read/write guest RAM */ + HVMMEM_ram_ro, /* Read-only; writes are discarded */ + HVMMEM_mmio_dm, /* Reads and write go to the device model */ +} hvmmem_type_t; + /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) @@ -109,11 +115,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 -typedef enum { - HVMMEM_ram_rw, /* Normal read/write guest RAM */ - HVMMEM_ram_ro, /* Read-only; writes are discarded */ - HVMMEM_mmio_dm, /* Reads and write go to the device model */ -} hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ @@ -225,4 +226,18 @@ #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ +#define HVMOP_get_mem_type 15 +/* Return hvmmem_type_t for the specified pfn. */ +struct xen_hvm_get_mem_type { + /* Domain to be queried. */ + domid_t domid; + /* OUT variable. */ + uint16_t mem_type; + uint16_t pad[2]; /* align next field on 8-byte boundary */ + /* IN variable. */ + uint64_t pfn; +}; +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t); + #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |