[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v2 06/11] vmx: add help functions to support PML





On 04/17/2015 06:57 AM, Tian, Kevin wrote:
From: Kai Huang [mailto:kai.huang@xxxxxxxxxxxxxxx]
Sent: Wednesday, April 15, 2015 3:04 PM

This patch adds help functions to enable/disable PML, and flush PML buffer for
single vcpu and particular domain for further use.

Signed-off-by: Kai Huang <kai.huang@xxxxxxxxxxxxxxx>
---
  xen/arch/x86/hvm/vmx/vmcs.c        | 178
+++++++++++++++++++++++++++++++++++++
  xen/include/asm-x86/hvm/vmx/vmcs.h |   9 ++
  2 files changed, 187 insertions(+)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index d120370..d3cb50f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1328,6 +1328,184 @@ void vmx_clear_eoi_exit_bitmap(struct vcpu *v,
u8 vector)
                  &v->arch.hvm_vmx.eoi_exitmap_changed);
  }

+bool_t vmx_vcpu_pml_enabled(const struct vcpu *v)
+{
+    return !!(v->arch.hvm_vmx.secondary_exec_control &
+              SECONDARY_EXEC_ENABLE_PML);
+}
+
+int vmx_vcpu_enable_pml(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+
+    if ( vmx_vcpu_pml_enabled(v) )
+        return 0;
+
+    v->arch.hvm_vmx.pml_pg = d->arch.paging.alloc_page(d);
+    if ( !v->arch.hvm_vmx.pml_pg )
+        return -ENOMEM;
+
+    vmx_vmcs_enter(v);
+
+    __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) <<
PAGE_SHIFT);
+    __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
+
+    v->arch.hvm_vmx.secondary_exec_control |=
SECONDARY_EXEC_ENABLE_PML;
+
+    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+              v->arch.hvm_vmx.secondary_exec_control);
+
+    vmx_vmcs_exit(v);
+
+    return 0;
+}
+
+void vmx_vcpu_disable_pml(struct vcpu *v)
+{
+    if ( !vmx_vcpu_pml_enabled(v) )
+        return;
+
+    /* Make sure we don't lose any logged GPAs */
+    vmx_vcpu_flush_pml_buffer(v);
+
+    vmx_vmcs_enter(v);
+
+    v->arch.hvm_vmx.secondary_exec_control &=
~SECONDARY_EXEC_ENABLE_PML;
+    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+              v->arch.hvm_vmx.secondary_exec_control);
+
+    vmx_vmcs_exit(v);
+
+    v->domain->arch.paging.free_page(v->domain,
v->arch.hvm_vmx.pml_pg);
+    v->arch.hvm_vmx.pml_pg = NULL;
+}
+
+void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
+{
+    uint64_t *pml_buf;
+    unsigned long pml_idx;
+
+    ASSERT(vmx_vcpu_pml_enabled(v));
+
+    vmx_vmcs_enter(v);
+
+    __vmread(GUEST_PML_INDEX, &pml_idx);
+
+    /* Do nothing if PML buffer is empty */
+    if ( pml_idx == (NR_PML_ENTRIES - 1) )
+        goto out;
+
+    pml_buf = __map_domain_page(v->arch.hvm_vmx.pml_pg);
+
+    /*
+     * PML index can be either 2^16-1 (buffer is full), or 0~511 (buffer is
not
0~NR_PML_ENTRIES-1
Will do.


+     * full), and in latter case PML index always points to next available
+     * entity.
+     */
+    if (pml_idx >= NR_PML_ENTRIES)
+        pml_idx = 0;
+    else
+        pml_idx++;
+
+    for ( ; pml_idx < NR_PML_ENTRIES; pml_idx++ )
+    {
+        unsigned long gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
+        /*
+         * Need to change type from log-dirty to normal memory for
logged GFN.
+         * hap_track_dirty_vram depends on it to work. And we really only
need
+         * to mark GFNs which hve been successfully changed from
log-dirty to
+         * normal memory to be dirty.
+         */
+        if ( !p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty,
+                    p2m_ram_rw) )
+            paging_mark_gfn_dirty(v->domain, gfn);
Should we handle error from p2m_change_type_one and consequently
making this flush function non-void?

+    }
+
+    unmap_domain_page(pml_buf);
+
+    /* Reset PML index */
+    __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
Like Jan pointed out an assertion of vcpu status is required here otherwise
blindly reset PML index may race with new logged entries from a running
vcpu.
Yeah will do.

Thanks,
-Kai

+
+out:
+    vmx_vmcs_exit(v);
+}
+
+bool_t vmx_domain_pml_enabled(const struct domain *d)
+{
+    return !!(d->arch.hvm_domain.vmx.status &
VMX_DOMAIN_PML_ENABLED);
+}
+
+/*
+ * This function enables PML for particular domain. It should be called when
+ * domain is paused.
+ *
+ * PML needs to be enabled globally for all vcpus of the domain, as PML
buffer
+ * and PML index are pre-vcpu, but EPT table is shared by vcpus, therefore
+ * enabling PML on partial vcpus won't work.
+ */
+int vmx_domain_enable_pml(struct domain *d)
+{
+    struct vcpu *v;
+    int rc;
+
+    ASSERT(atomic_read(&d->pause_count));
+
+    if ( vmx_domain_pml_enabled(d) )
+        return 0;
+
+    for_each_vcpu( d, v )
+        if ( (rc = vmx_vcpu_enable_pml(v)) != 0 )
+            goto error;
+
+    d->arch.hvm_domain.vmx.status |= VMX_DOMAIN_PML_ENABLED;
I didn't see how this domain-wise flag is useful. Or if we really
want to go this way, you also need to clear this flag if vcpu pml
enable is failed in vcpu hotplug phase, since the flag itself means
all vcpus of the domain so we must keep this intention in all
places.

+
+    return 0;
+
+error:
+    for_each_vcpu( d, v )
+        if ( vmx_vcpu_pml_enabled(v) )
+            vmx_vcpu_disable_pml(v);
+    return rc;
+}
+
+/*
+ * Disable PML for particular domain. Called when domain is paused.
+ *
+ * The same as enabling PML for domain, disabling PML should be done for
all
+ * vcpus at once.
+ */
+void vmx_domain_disable_pml(struct domain *d)
+{
+    struct vcpu *v;
+
+    ASSERT(atomic_read(&d->pause_count));
+
+    if ( !vmx_domain_pml_enabled(d) )
+        return;
+
+    for_each_vcpu( d, v )
+        vmx_vcpu_disable_pml(v);
+
+    d->arch.hvm_domain.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
+}
+
+/*
+ * Flush PML buffer of all vcpus, and update the logged dirty pages to
log-dirty
+ * radix tree. Called when domain is paused.
+ */
+void vmx_domain_flush_pml_buffers(struct domain *d)
+{
+    struct vcpu *v;
+
+    ASSERT(atomic_read(&d->pause_count));
+
+    if ( !vmx_domain_pml_enabled(d) )
+        return;
+
+    for_each_vcpu( d, v )
+        vmx_vcpu_flush_pml_buffer(v);
+}
+
  int vmx_create_vmcs(struct vcpu *v)
  {
      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 2c679ac..ceb09bf 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -498,6 +498,15 @@ static inline int vmx_add_host_load_msr(u32 msr)

  DECLARE_PER_CPU(bool_t, vmxon);

+bool_t vmx_vcpu_pml_enabled(const struct vcpu *v);
+int vmx_vcpu_enable_pml(struct vcpu *v);
+void vmx_vcpu_disable_pml(struct vcpu *v);
+void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
+bool_t vmx_domain_pml_enabled(const struct domain *d);
+int vmx_domain_enable_pml(struct domain *d);
+void vmx_domain_disable_pml(struct domain *d);
+void vmx_domain_flush_pml_buffers(struct domain *d);
+
  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */

  /*
--
2.1.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.