[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] x86/hvm/viridian: zero and check vcpu context __pad field



Commit 57844631 "save APIC assist vector" added an extra field to the
viridian vcpu context save record. This field was only a uint8_t and
so an extra __pad field was also added to pad up to the next 64-bit
boundary.

This patch makes sure that __pad field is zeroed on save and checked
for zero on restore. This prevents a potential leak of information
from the stack and a compatibility check against future use of the
space occupied by the __pad field.

This patch also adds a memset to make sure that the viridian domain
context is fully zeroed. This is not strictly necessary but helps
make the code more robust if fields are added to that struct in
future.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---

v3:
 - make zero_page accessible outside mm.c

v2:
 - drop is_zero() helper an use memcmp against zero_page instead.
 - add memset to viridian_save_domain_ctxt() to reduce potential
   for information leakage in future.
---
 xen/arch/x86/hvm/viridian.c | 7 +++++++
 xen/arch/x86/mm.c           | 5 +++--
 xen/include/asm-x86/mm.h    | 2 ++
 3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 5c76c1a..165f58e 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -785,6 +785,8 @@ static int viridian_save_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     if ( !is_viridian_domain(d) )
         return 0;
 
+    memset(&ctxt, 0, sizeof(ctxt));
+
     ctxt.time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val;
     ctxt.hypercall_gpa  = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
     ctxt.guest_os_id    = d->arch.hvm_domain.viridian.guest_os_id.raw;
@@ -824,6 +826,8 @@ static int viridian_save_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     for_each_vcpu( d, v ) {
         struct hvm_viridian_vcpu_context ctxt;
 
+        memset(&ctxt, 0, sizeof(ctxt));
+
         ctxt.apic_assist_msr = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw;
         ctxt.apic_assist_vector = v->arch.hvm_vcpu.viridian.apic_assist.vector;
 
@@ -851,6 +855,9 @@ static int viridian_load_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     if ( hvm_load_entry_zeroextend(VIRIDIAN_VCPU, h, &ctxt) != 0 )
         return -EINVAL;
 
+    if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) )
+        return -EINVAL;
+
     v->arch.hvm_vcpu.viridian.apic_assist.msr.raw = ctxt.apic_assist_msr;
     if ( v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.enabled )
         initialize_apic_assist(v);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c997b53..b8b41fa 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -589,7 +589,8 @@ static inline void guest_get_eff_kern_l1e(struct vcpu *v, 
unsigned long addr,
     TOGGLE_MODE();
 }
 
-static const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
+static const char __section(".bss.page_aligned.const") __zero_page[PAGE_SIZE];
+const char *zero_page = __zero_page;
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
@@ -4562,7 +4563,7 @@ void destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
     unsigned int i;
-    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(__zero_page));
 
     v->arch.pv_vcpu.gdt_ents = 0;
     pl1e = gdt_ldt_ptes(v->domain, v);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b25942b..01553ab 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -595,4 +595,6 @@ typedef struct mm_rwlock {
                        &(d)->xenpage_list : &(d)->page_list,            \
                    &(d)->arch.relmem_list)
 
+extern const char *zero_page;
+
 #endif /* __ASM_X86_MM_H__ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.