|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.19] x86/ept: move vmx_domain_flush_pml_buffers() to p2m-ept.c
commit fee18c96f917e26bfbcec523fd368669ade931cc
Author: Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Tue Jul 22 10:00:16 2025 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Jul 22 10:00:16 2025 +0200
x86/ept: move vmx_domain_flush_pml_buffers() to p2m-ept.c
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
master commit: cca662dea76059d63ca93d32e2362c31579266ab
master date: 2025-07-18 09:17:25 +0200
---
xen/arch/x86/hvm/vmx/vmcs.c | 59 ++-------------------------------
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 1 -
xen/arch/x86/include/asm/hvm/vmx/vmx.h | 1 +
xen/arch/x86/mm/p2m-ept.c | 56 +++++++++++++++++++++++++++++++
5 files changed, 60 insertions(+), 59 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index aa7911089a..49344bcbc6 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1684,7 +1684,7 @@ void vmx_vcpu_disable_pml(struct vcpu *v)
return;
/* Make sure we don't lose any logged GPAs. */
- vmx_vcpu_flush_pml_buffer(v);
+ ept_vcpu_flush_pml_buffer(v);
vmx_vmcs_enter(v);
@@ -1698,61 +1698,6 @@ void vmx_vcpu_disable_pml(struct vcpu *v)
v->arch.hvm.vmx.pml_pg = NULL;
}
-void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
-{
- uint64_t *pml_buf;
- unsigned long pml_idx;
-
- ASSERT((v == current) || (!vcpu_runnable(v) && !v->is_running));
- ASSERT(vmx_vcpu_pml_enabled(v));
-
- vmx_vmcs_enter(v);
-
- __vmread(GUEST_PML_INDEX, &pml_idx);
-
- /* Do nothing if PML buffer is empty. */
- if ( pml_idx == (NR_PML_ENTRIES - 1) )
- goto out;
-
- pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
-
- /*
- * PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
- * (buffer is not full), and in latter case PML index always points to next
- * available entity.
- */
- if ( pml_idx >= NR_PML_ENTRIES )
- pml_idx = 0;
- else
- pml_idx++;
-
- for ( ; pml_idx < NR_PML_ENTRIES; pml_idx++ )
- {
- unsigned long gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
-
- /*
- * Need to change type from log-dirty to normal memory for logged GFN.
- * hap_track_dirty_vram depends on it to work. And we mark all logged
- * GFNs to be dirty, as we cannot be sure whether it's safe to ignore
- * GFNs on which p2m_change_type_one returns failure. The failure cases
- * are very rare, and additional cost is negligible, but a missing mark
- * is extremely difficult to debug.
- */
- p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-
- /* HVM guest: pfn == gfn */
- paging_mark_pfn_dirty(v->domain, _pfn(gfn));
- }
-
- unmap_domain_page(pml_buf);
-
- /* Reset PML index */
- __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
-
- out:
- vmx_vmcs_exit(v);
-}
-
bool vmx_domain_pml_enabled(const struct domain *d)
{
return d->arch.hvm.vmx.status & VMX_DOMAIN_PML_ENABLED;
@@ -1826,7 +1771,7 @@ void vmx_domain_flush_pml_buffers(struct domain *d)
return;
for_each_vcpu ( d, v )
- vmx_vcpu_flush_pml_buffer(v);
+ ept_vcpu_flush_pml_buffer(v);
}
static void vmx_vcpu_update_eptp(struct vcpu *v, u64 eptp)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cb2cc8aa28..b4e61e0258 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4707,7 +4707,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
!(idtv_info & INTR_INFO_VALID_MASK) )
undo_nmis_unblocked_by_iret();
- vmx_vcpu_flush_pml_buffer(v);
+ ept_vcpu_flush_pml_buffer(v);
break;
case EXIT_REASON_XSAVES:
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 58140af691..feeca40f31 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -687,7 +687,6 @@ DECLARE_PER_CPU(bool, vmxon);
bool vmx_vcpu_pml_enabled(const struct vcpu *v);
int vmx_vcpu_enable_pml(struct vcpu *v);
void vmx_vcpu_disable_pml(struct vcpu *v);
-void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
bool vmx_domain_pml_enabled(const struct domain *d);
int vmx_domain_enable_pml(struct domain *d);
void vmx_domain_disable_pml(struct domain *d);
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 1489dd05c2..3b01edf5e7 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -588,6 +588,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn);
bool ept_handle_misconfig(uint64_t gpa);
int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int order, bool *ipat, p2m_type_t type);
+void ept_vcpu_flush_pml_buffer(struct vcpu *v);
void setup_ept_dump(void);
/* Locate an alternate p2m by its EPTP */
unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 469e27ee93..952fe3ccb1 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1370,6 +1370,62 @@ static void cf_check ept_flush_pml_buffers(struct
p2m_domain *p2m)
vmx_domain_flush_pml_buffers(p2m->domain);
}
+void ept_vcpu_flush_pml_buffer(struct vcpu *v)
+{
+ uint64_t *pml_buf;
+ unsigned long pml_idx;
+
+ ASSERT((v == current) || (!vcpu_runnable(v) && !v->is_running));
+ ASSERT(vmx_vcpu_pml_enabled(v));
+
+ vmx_vmcs_enter(v);
+
+ __vmread(GUEST_PML_INDEX, &pml_idx);
+
+ /* Do nothing if PML buffer is empty. */
+ if ( pml_idx == (NR_PML_ENTRIES - 1) )
+ goto out;
+
+ pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
+
+ /*
+ * PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
+ * (buffer is not full), and in latter case PML index always points to next
+ * available entity.
+ */
+ if ( pml_idx >= NR_PML_ENTRIES )
+ pml_idx = 0;
+ else
+ pml_idx++;
+
+ for ( ; pml_idx < NR_PML_ENTRIES; pml_idx++ )
+ {
+ unsigned long gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
+
+ /*
+ * Need to change type from log-dirty to normal memory for logged GFN.
+ * hap_track_dirty_vram depends on it to work. And we mark all loqgged
+ * GFNs to be dirty, as we cannot be sure whether it's safe to ignore
+ * GFNs on which p2m_change_type_one returns failure. The failure cases
+ * are very rare, and additional cost is negligible, but a missing mark
+ * is extremely difficult to debug.
+ */
+ p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+
+ /* HVM guest: pfn == gfn */
+ paging_mark_pfn_dirty(v->domain, _pfn(gfn));
+ }
+
+ unmap_domain_page(pml_buf);
+
+ /* Reset PML index */
+ __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
+
+ out:
+ vmx_vmcs_exit(v);
+
+}
+
int ept_p2m_init(struct p2m_domain *p2m)
{
struct ept_data *ept = &p2m->ept;
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.19
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |