[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.9] x86/shadow: Infrastructure to force a PV guest into shadow mode



commit 4704d590fa2ec7307d18e41f3e53ac4ae4a834a7
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Mon Jul 23 07:11:40 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Aug 14 17:20:00 2018 +0100

    x86/shadow: Infrastructure to force a PV guest into shadow mode
    
    To mitigate L1TF, we cannot alter an architecturally-legitimate PTE a PV 
guest
    chooses to write, but we can force the PV domain into shadow mode so Xen
    controls the PTEs which are reachable by the CPU pagewalk.
    
    Introduce new shadow mode, PG_SH_forced, and a tasklet to perform the
    transition.  Later patches will introduce the logic to enable this mode at 
the
    appropriate time.
    
    To simplify vcpu cleanup, make tasklet_kill() idempotent with respect to
    tasklet_init(), which involves adding a helper to check for an uninitialised
    list head.
    
    This is part of XSA-273 / CVE-2018-3620.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit b76ec3946bf6caca2c3950b857c008bc8db6723f)
---
 xen/arch/x86/domain.c           |  7 +++++++
 xen/arch/x86/mm/paging.c        |  2 ++
 xen/arch/x86/mm/shadow/common.c | 32 ++++++++++++++++++++++++++++++++
 xen/common/tasklet.c            |  5 +++++
 xen/include/asm-x86/domain.h    |  5 +++++
 xen/include/asm-x86/paging.h    |  4 ++++
 xen/include/asm-x86/shadow.h    | 32 ++++++++++++++++++++++++++++++++
 xen/include/xen/list.h          |  5 +++++
 8 files changed, 92 insertions(+)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 5d6622fc08..5058c984c3 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -67,6 +67,7 @@
 #include <compat/vcpu.h>
 #include <asm/psr.h>
 #include <asm/spec_ctrl.h>
+#include <asm/shadow.h>
 
 static __read_mostly enum {
     PCID_OFF,
@@ -647,6 +648,8 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
         rc = 0;
     else
     {
+        pv_l1tf_domain_init(d);
+
         d->arch.pv_domain.gdt_ldt_l1tab =
             alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
         if ( !d->arch.pv_domain.gdt_ldt_l1tab )
@@ -792,6 +795,8 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
     {
+        pv_l1tf_domain_destroy(d);
+
         xfree(d->arch.pv_domain.cpuidmasks);
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
     }
@@ -815,6 +820,8 @@ void arch_domain_destroy(struct domain *d)
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
     {
+        pv_l1tf_domain_destroy(d);
+
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
         xfree(d->arch.pv_domain.cpuidmasks);
     }
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index b0037c400c..5a419d4d42 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -879,6 +879,8 @@ void paging_dump_domain_info(struct domain *d)
         printk("    paging assistance: ");
         if ( paging_mode_shadow(d) )
             printk("shadow ");
+        if ( paging_mode_sh_forced(d) )
+            printk("forced ");
         if ( paging_mode_hap(d) )
             printk("hap ");
         if ( paging_mode_refcounts(d) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 82e130306c..cf7d89df07 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3057,6 +3057,15 @@ static void sh_new_mode(struct domain *d, u32 new_mode)
     ASSERT(paging_locked_by_me(d));
     ASSERT(d != current->domain);
 
+    /*
+     * If PG_SH_forced has previously been activated because of writing an
+     * L1TF-vulnerable PTE, it must remain active for the remaining lifetime
+     * of the domain, even if the logdirty mode needs to be controlled for
+     * migration purposes.
+     */
+    if ( paging_mode_sh_forced(d) )
+        new_mode |= PG_SH_forced | PG_SH_enable;
+
     d->arch.paging.mode = new_mode;
     for_each_vcpu(d, v)
         sh_update_paging_modes(v);
@@ -3935,6 +3944,29 @@ void shadow_audit_tables(struct vcpu *v)
 
 #endif /* Shadow audit */
 
+void pv_l1tf_tasklet(unsigned long data)
+{
+    struct domain *d = (void *)data;
+
+    domain_pause(d);
+    paging_lock(d);
+
+    if ( !paging_mode_sh_forced(d) && !d->is_dying )
+    {
+        int ret = shadow_one_bit_enable(d, PG_SH_forced);
+
+        if ( ret )
+        {
+            printk(XENLOG_G_ERR "d%d Failed to enable PG_SH_forced: %d\n",
+                   d->domain_id, ret);
+            domain_crash(d);
+        }
+    }
+
+    paging_unlock(d);
+    domain_unpause(d);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c
index 365a7778c9..b9b8904d5b 100644
--- a/xen/common/tasklet.c
+++ b/xen/common/tasklet.c
@@ -152,6 +152,10 @@ void tasklet_kill(struct tasklet *t)
 
     spin_lock_irqsave(&tasklet_lock, flags);
 
+    /* Cope with uninitialised tasklets. */
+    if ( list_head_is_null(&t->list) )
+        goto unlock;
+
     if ( !list_empty(&t->list) )
     {
         BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
@@ -168,6 +172,7 @@ void tasklet_kill(struct tasklet *t)
         spin_lock_irqsave(&tasklet_lock, flags);
     }
 
+ unlock:
     spin_unlock_irqrestore(&tasklet_lock, flags);
 }
 
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 702ec64ac1..c2c9f029c8 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -121,6 +121,9 @@ struct shadow_domain {
 
     /* Has this domain ever used HVMOP_pagetable_dying? */
     bool_t pagetable_dying_op;
+
+    /* PV L1 Terminal Fault mitigation. */
+    struct tasklet pv_l1tf_tasklet;
 #endif
 };
 
@@ -257,6 +260,8 @@ struct pv_domain
     bool xpti;
     /* Use PCID feature? */
     bool pcid;
+    /* Mitigate L1TF with shadow/crashing? */
+    bool check_l1tf;
 
     /* map_domain_page() mapping cache. */
     struct mapcache_domain mapcache;
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 5bce5e54a6..a94b8f6c9d 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -37,11 +37,14 @@
 
 #define PG_SH_shift    20
 #define PG_HAP_shift   21
+#define PG_SHF_shift   22
 /* We're in one of the shadow modes */
 #ifdef CONFIG_SHADOW_PAGING
 #define PG_SH_enable   (1U << PG_SH_shift)
+#define PG_SH_forced   (1U << PG_SHF_shift)
 #else
 #define PG_SH_enable   0
+#define PG_SH_forced   0
 #endif
 #define PG_HAP_enable  (1U << PG_HAP_shift)
 
@@ -62,6 +65,7 @@
 
 #define paging_mode_enabled(_d)   (!!(_d)->arch.paging.mode)
 #define paging_mode_shadow(_d)    (!!((_d)->arch.paging.mode & PG_SH_enable))
+#define paging_mode_sh_forced(_d) (!!((_d)->arch.paging.mode & PG_SH_forced))
 #define paging_mode_hap(_d)       (!!((_d)->arch.paging.mode & PG_HAP_enable))
 
 #define paging_mode_refcounts(_d) (!!((_d)->arch.paging.mode & PG_refcounts))
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 7e1ed3b129..cbb226285b 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -29,6 +29,7 @@
 #include <asm/flushtlb.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
+#include <asm/spec_ctrl.h>
 
 /*****************************************************************************
  * Macros to tell which shadow paging mode a domain is in*/
@@ -114,6 +115,37 @@ static inline int shadow_domctl(struct domain *d, 
xen_domctl_shadow_op_t *sc,
 
 #endif /* CONFIG_SHADOW_PAGING */
 
+/*
+ * Mitigations for L1TF / CVE-2018-3620 for PV guests.
+ *
+ * We cannot alter an architecturally-legitimate PTE which a PV guest has
+ * chosen to write, as traditional paged-out metadata is L1TF-vulnerable.
+ * What we can do is force a PV guest which writes a vulnerable PTE into
+ * shadow mode, so Xen controls the pagetables which are reachable by the CPU
+ * pagewalk.
+ */
+
+void pv_l1tf_tasklet(unsigned long data);
+
+static inline void pv_l1tf_domain_init(struct domain *d)
+{
+    d->arch.pv_domain.check_l1tf =
+        opt_pv_l1tf & (is_hardware_domain(d)
+                       ? OPT_PV_L1TF_DOM0 : OPT_PV_L1TF_DOMU);
+
+#ifdef CONFIG_SHADOW_PAGING
+    tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet,
+                 pv_l1tf_tasklet, (unsigned long)d);
+#endif
+}
+
+static inline void pv_l1tf_domain_destroy(struct domain *d)
+{
+#ifdef CONFIG_SHADOW_PAGING
+    tasklet_kill(&d->arch.paging.shadow.pv_l1tf_tasklet);
+#endif
+}
+
 /* Remove all shadows of the guest mfn. */
 static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn)
 {
diff --git a/xen/include/xen/list.h b/xen/include/xen/list.h
index fa07d720ee..1387abb211 100644
--- a/xen/include/xen/list.h
+++ b/xen/include/xen/list.h
@@ -51,6 +51,11 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
     list->prev = list;
 }
 
+static inline bool list_head_is_null(const struct list_head *list)
+{
+    return !list->next && !list->prev;
+}
+
 /*
  * Insert a new entry between two known consecutive entries. 
  *
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.9

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.