|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH SpectreV1+L1TF v7 9/9] common/grant_table: block speculative out-of-bound accesses
Guests can issue grant table operations and provide guest controlled
data to them. This data is also used for memory loads. To avoid
speculative out-of-bound accesses, we use the array_index_nospec macro
where applicable. However, there are also memory accesses that cannot
be protected by a single array protection, or multiple accesses in a
row. To protect these, a nospec barrier is placed between the actual
range check and the access via the block_speculation macro.
As different versions of grant tables use structures of different size,
and the status is encoded in an array for version 2, speculative
execution might perform out-of-bound accesses of version 2 while
the table is actually using version 1. Hence, speculation is prevented
when accessing memory based on the grant table version.
This is part of the speculative hardening effort.
Signed-off-by: Norbert Manthey <nmanthey@xxxxxxxxx>
---
Notes:
v7: mention speculative hardening in commit message
add spaces for arithmetic
introduce switch and block_speculation macro in shared_entry_header
introduce block_speculation macro in nr_grant_entries
use block_speculation instead of array_index_nospec(..., nr_grant_entries)
as nr_grant_entries comes with an lfence instruction by now
cache results of nr_grant_entries when being used in loops to avoid lfence
use lfence in shared_entry_header to ensure bound checks
xen/common/grant_table.c | 78 +++++++++++++++++++++++++++++++++++++-----------
1 file changed, 61 insertions(+), 17 deletions(-)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -37,6 +37,7 @@
#include <xen/paging.h>
#include <xen/keyhandler.h>
#include <xen/vmap.h>
+#include <xen/nospec.h>
#include <xsm/xsm.h>
#include <asm/flushtlb.h>
@@ -203,8 +204,9 @@ static inline unsigned int nr_status_frames(const struct
grant_table *gt)
}
#define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
-#define maptrack_entry(t, e) \
- ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
+#define maptrack_entry(t, e)
\
+ ((t)->maptrack[array_index_nospec(e, (t)->maptrack_limit) /
\
+ MAPTRACK_PER_PAGE][(e) %
MAPTRACK_PER_PAGE])
static inline unsigned int
nr_maptrack_frames(struct grant_table *t)
@@ -226,10 +228,18 @@ nr_maptrack_frames(struct grant_table *t)
static grant_entry_header_t *
shared_entry_header(struct grant_table *t, grant_ref_t ref)
{
- if ( t->gt_version == 1 )
+ switch ( t->gt_version )
+ {
+ case 1:
+ /* Make sure we return a value independently of speculative execution
*/
+ block_speculation();
return (grant_entry_header_t*)&shared_entry_v1(t, ref);
- else
+ case 2:
+ /* Make sure we return a value independently of speculative execution
*/
+ block_speculation();
return &shared_entry_v2(t, ref).hdr;
+ }
+ return NULL;
}
/* Active grant entry - used for shadowing GTF_permit_access grants. */
@@ -634,10 +644,16 @@ static unsigned int nr_grant_entries(struct grant_table
*gt)
case 1:
BUILD_BUG_ON(f2e(INITIAL_NR_GRANT_FRAMES, 1) <
GNTTAB_NR_RESERVED_ENTRIES);
+
+ /* Make sure we return a value independently of speculative execution
*/
+ block_speculation();
return f2e(nr_grant_frames(gt), 1);
case 2:
BUILD_BUG_ON(f2e(INITIAL_NR_GRANT_FRAMES, 2) <
GNTTAB_NR_RESERVED_ENTRIES);
+
+ /* Make sure we return a value independently of speculative execution
*/
+ block_speculation();
return f2e(nr_grant_frames(gt), 2);
#undef f2e
}
@@ -963,9 +979,15 @@ map_grant_ref(
PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref %#x for d%d\n",
op->ref, rgt->domain->domain_id);
+ /* Make sure the above bound check cannot be bypassed speculatively */
+ block_speculation();
+
act = active_entry_acquire(rgt, op->ref);
shah = shared_entry_header(rgt, op->ref);
- status = rgt->gt_version == 1 ? &shah->flags : &status_entry(rgt, op->ref);
+
+ /* Make sure we do not access memory speculatively */
+ status = evaluate_nospec(rgt->gt_version == 1) ? &shah->flags
+ : &status_entry(rgt, op->ref);
/* If already pinned, check the active domid and avoid refcnt overflow. */
if ( act->pin &&
@@ -987,7 +1009,7 @@ map_grant_ref(
if ( !act->pin )
{
- unsigned long gfn = rgt->gt_version == 1 ?
+ unsigned long gfn = evaluate_nospec(rgt->gt_version == 1) ?
shared_entry_v1(rgt, op->ref).frame :
shared_entry_v2(rgt, op->ref).full_page.frame;
@@ -1321,6 +1343,9 @@ unmap_common(
goto unlock_out;
}
+ /* Make sure the above bound check cannot be bypassed speculatively */
+ block_speculation();
+
act = active_entry_acquire(rgt, op->ref);
/*
@@ -1418,7 +1443,7 @@ unmap_common_complete(struct gnttab_unmap_common *op)
struct page_info *pg;
uint16_t *status;
- if ( !op->done )
+ if ( evaluate_nospec(!op->done) )
{
/* unmap_common() didn't do anything - nothing to complete. */
return;
@@ -2026,6 +2051,7 @@ gnttab_prepare_for_transfer(
goto fail;
}
+ /* This call ensures the above check cannot be bypassed speculatively */
sha = shared_entry_header(rgt, ref);
scombo.word = *(u32 *)&sha->flags;
@@ -2223,7 +2249,11 @@ gnttab_transfer(
okay = gnttab_prepare_for_transfer(e, d, gop.ref);
spin_lock(&e->page_alloc_lock);
- if ( unlikely(!okay) || unlikely(e->is_dying) )
+ /*
+ * Make sure the reference bound check in gnttab_prepare_for_transfer
+ * is respected and speculative execution is blocked accordingly
+ */
+ if ( unlikely(!evaluate_nospec(okay)) || unlikely(e->is_dying) )
{
bool_t drop_dom_ref = !domain_adjust_tot_pages(e, -1);
@@ -2253,7 +2283,7 @@ gnttab_transfer(
grant_read_lock(e->grant_table);
act = active_entry_acquire(e->grant_table, gop.ref);
- if ( e->grant_table->gt_version == 1 )
+ if ( evaluate_nospec(e->grant_table->gt_version == 1) )
{
grant_entry_v1_t *sha = &shared_entry_v1(e->grant_table, gop.ref);
@@ -2408,9 +2438,11 @@ acquire_grant_for_copy(
PIN_FAIL(gt_unlock_out, GNTST_bad_gntref,
"Bad grant reference %#x\n", gref);
- act = active_entry_acquire(rgt, gref);
+ /* This call makes sure the above check is not bypassed speculatively */
shah = shared_entry_header(rgt, gref);
- if ( rgt->gt_version == 1 )
+ act = active_entry_acquire(rgt, gref);
+
+ if ( evaluate_nospec(rgt->gt_version == 1) )
{
sha2 = NULL;
status = &shah->flags;
@@ -2826,6 +2858,9 @@ static int gnttab_copy_buf(const struct gnttab_copy *op,
op->dest.offset, dest->ptr.offset,
op->len, dest->len);
+ /* Make sure the above checks are not bypassed speculatively */
+ block_speculation();
+
memcpy(dest->virt + op->dest.offset, src->virt + op->source.offset,
op->len);
gnttab_mark_dirty(dest->domain, dest->mfn);
@@ -2946,6 +2981,7 @@
gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop)
grant_entry_v1_t reserved_entries[GNTTAB_NR_RESERVED_ENTRIES];
int res;
unsigned int i;
+ unsigned int gt_nr_grant_entries;
if ( copy_from_guest(&op, uop, 1) )
return -EFAULT;
@@ -2969,7 +3005,8 @@
gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop)
* are allowed to be in use (xenstore/xenconsole keeps them mapped).
* (You need to change the version number for e.g. kexec.)
*/
- for ( i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_grant_entries(gt); i++ )
+ gt_nr_grant_entries = nr_grant_entries(gt);
+ for ( i = GNTTAB_NR_RESERVED_ENTRIES; i < gt_nr_grant_entries; i++ )
{
if ( read_atomic(&_active_entry(gt, i).pin) != 0 )
{
@@ -3211,6 +3248,9 @@ swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b)
if ( unlikely(ref_b >= nr_grant_entries(d->grant_table)))
PIN_FAIL(out, GNTST_bad_gntref, "Bad ref-b %#x\n", ref_b);
+ /* Make sure the above checks are not bypassed speculatively */
+ block_speculation();
+
/* Swapping the same ref is a no-op. */
if ( ref_a == ref_b )
goto out;
@@ -3223,7 +3263,7 @@ swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b)
if ( act_b->pin )
PIN_FAIL(out, GNTST_eagain, "ref b %#x busy\n", ref_b);
- if ( gt->gt_version == 1 )
+ if ( evaluate_nospec(gt->gt_version == 1) )
{
grant_entry_v1_t shared;
@@ -3681,12 +3721,14 @@ void grant_table_warn_active_grants(struct domain *d)
struct active_grant_entry *act;
grant_ref_t ref;
unsigned int nr_active = 0;
+ unsigned int gt_nr_grant_entries;
#define WARN_GRANT_MAX 10
grant_read_lock(gt);
- for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
+ gt_nr_grant_entries = nr_grant_entries(gt);
+ for ( ref = 0; ref != gt_nr_grant_entries; ref++ )
{
act = active_entry_acquire(gt, ref);
if ( !act->pin )
@@ -3771,7 +3813,7 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt,
grant_ref_t ref,
rc = -EINVAL;
else if ( ref >= nr_grant_entries(gt) )
rc = -ENOENT;
- else if ( gt->gt_version == 1 )
+ else if ( evaluate_nospec(gt->gt_version == 1) )
{
const grant_entry_v1_t *sha1 = &shared_entry_v1(gt, ref);
@@ -3793,7 +3835,7 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt,
grant_ref_t ref,
rc = -ENXIO;
else if ( !rc && status )
{
- if ( gt->gt_version == 1 )
+ if ( evaluate_nospec(gt->gt_version == 1) )
*status = flags;
else
*status = status_entry(gt, ref);
@@ -3935,6 +3977,7 @@ static void gnttab_usage_print(struct domain *rd)
int first = 1;
grant_ref_t ref;
struct grant_table *gt = rd->grant_table;
+ unsigned int gt_nr_grant_entries;
printk(" -------- active -------- -------- shared --------\n");
printk("[ref] localdom mfn pin localdom gmfn flags\n");
@@ -3947,7 +3990,8 @@ static void gnttab_usage_print(struct domain *rd)
nr_grant_frames(gt), gt->max_grant_frames,
nr_maptrack_frames(gt), gt->max_maptrack_frames);
- for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
+ gt_nr_grant_entries = nr_grant_entries(gt);
+ for ( ref = 0; ref != gt_nr_grant_entries; ref++ )
{
struct active_grant_entry *act;
struct grant_entry_header *sha;
--
2.7.4
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrer: Christian Schlaeger, Ralf Herbrich
Ust-ID: DE 289 237 879
Eingetragen am Amtsgericht Charlottenburg HRB 149173 B
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |